Commit bc0247a4 authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates 2015-01-22

This series contains updates to e1000, e1000e, igb, fm10k and virtio_net.

Asaf Vertz provides a fix for e1000 to future-proof the time comparisons
by using time_after_eq() instead of plain math.

Mathias Koehrer provides a fix for e1000e to add a check to e1000_xmit_frame()
to ensure a work queue will not be scheduled that has not been initialized.

Jacob adds the use of software timestamping via the virtio_net driver.

Alex Duyck cleans up page reuse code in igb and fm10k.  Cleans up the
page reuse code from getting into a state where all the workarounds
needed are in place as well as cleaning up oversights, such as using
__free_pages instead of put_page to drop a locally allocated page.

Richard Cochran provides 4 patches for igb dealing with time sync.
First provides a helper function since the code that handles the time
sync interrupt is repeated in three different places.  Then serializes
the access to the time sync interrupt since the registers may be
manipulated from different contexts.  Enables the use of i210 device
interrupt to generate an internal PPS event for adjusting the kernel
system time.  The i210 device offers a number of special PTP hardware
clock features on the Software Defined Pins (SDPs), so added support for
two of the possible functions (time stamping external events and
periodic output signals).

Or Gerlitz fixes fm10k from double setting of NETIF_F_SG since the
networking core does it for the driver during registration time.

Joe Stringer adds support for up to 104 bytes of inner+outer headers in
fm10k and adds an initial check to fail encapsulation offload if these
are too large.

Matthew increases the timeout for the data path reset based on feedback
from the hardware team, since 100us is too short of a time to wait for
the data path reset to complete.

Alexander Graf provides a fix for igb to indicate failure on VF reset
for an empty MAC address, to mirror the behavior of ixgbe.

Florian Westphal updates e1000 and e1000e to support txtd update delay
via xmit_more, this way we won't update the Tx tail descriptor if the
queue has not been stopped and we know at least one more skb will be
sent right away.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 86b368b4 472f31f5
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
/* ethtool support for e1000 */ /* ethtool support for e1000 */
#include "e1000.h" #include "e1000.h"
#include <linux/jiffies.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
enum {NETDEV_STATS, E1000_STATS}; enum {NETDEV_STATS, E1000_STATS};
...@@ -1460,7 +1461,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter) ...@@ -1460,7 +1461,7 @@ static int e1000_run_loopback_test(struct e1000_adapter *adapter)
ret_val = 13; /* ret_val is the same as mis-compare */ ret_val = 13; /* ret_val is the same as mis-compare */
break; break;
} }
if (jiffies >= (time + 2)) { if (time_after_eq(jiffies, time + 2)) {
ret_val = 14; /* error code for time out error */ ret_val = 14; /* error code for time out error */
break; break;
} }
......
...@@ -2977,7 +2977,6 @@ static void e1000_tx_queue(struct e1000_adapter *adapter, ...@@ -2977,7 +2977,6 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
struct e1000_tx_ring *tx_ring, int tx_flags, struct e1000_tx_ring *tx_ring, int tx_flags,
int count) int count)
{ {
struct e1000_hw *hw = &adapter->hw;
struct e1000_tx_desc *tx_desc = NULL; struct e1000_tx_desc *tx_desc = NULL;
struct e1000_tx_buffer *buffer_info; struct e1000_tx_buffer *buffer_info;
u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS; u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
...@@ -3031,11 +3030,6 @@ static void e1000_tx_queue(struct e1000_adapter *adapter, ...@@ -3031,11 +3030,6 @@ static void e1000_tx_queue(struct e1000_adapter *adapter,
wmb(); wmb();
tx_ring->next_to_use = i; tx_ring->next_to_use = i;
writel(i, hw->hw_addr + tx_ring->tdt);
/* we need this if more than one processor can write to our tail
* at a time, it synchronizes IO on IA64/Altix systems
*/
mmiowb();
} }
/* 82547 workaround to avoid controller hang in half-duplex environment. /* 82547 workaround to avoid controller hang in half-duplex environment.
...@@ -3264,6 +3258,15 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, ...@@ -3264,6 +3258,15 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
/* Make sure there is space in the ring for the next send. */ /* Make sure there is space in the ring for the next send. */
e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2); e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
if (!skb->xmit_more ||
netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
writel(tx_ring->next_to_use, hw->hw_addr + tx_ring->tdt);
/* we need this if more than one processor can write to
* our tail at a time, it synchronizes IO on IA64/Altix
* systems
*/
mmiowb();
}
} else { } else {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
tx_ring->buffer_info[first].time_stamp = 0; tx_ring->buffer_info[first].time_stamp = 0;
......
...@@ -5444,16 +5444,6 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count) ...@@ -5444,16 +5444,6 @@ static void e1000_tx_queue(struct e1000_ring *tx_ring, int tx_flags, int count)
wmb(); wmb();
tx_ring->next_to_use = i; tx_ring->next_to_use = i;
if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
e1000e_update_tdt_wa(tx_ring, i);
else
writel(i, tx_ring->tail);
/* we need this if more than one processor can write to our tail
* at a time, it synchronizes IO on IA64/Altix systems
*/
mmiowb();
} }
#define MINIMUM_DHCP_PACKET_SIZE 282 #define MINIMUM_DHCP_PACKET_SIZE 282
...@@ -5636,8 +5626,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, ...@@ -5636,8 +5626,9 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit, count = e1000_tx_map(tx_ring, skb, first, adapter->tx_fifo_limit,
nr_frags); nr_frags);
if (count) { if (count) {
if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) &&
!adapter->tx_hwtstamp_skb)) { (adapter->flags & FLAG_HAS_HW_TIMESTAMP) &&
!adapter->tx_hwtstamp_skb) {
skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
tx_flags |= E1000_TX_FLAGS_HWTSTAMP; tx_flags |= E1000_TX_FLAGS_HWTSTAMP;
adapter->tx_hwtstamp_skb = skb_get(skb); adapter->tx_hwtstamp_skb = skb_get(skb);
...@@ -5654,6 +5645,21 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb, ...@@ -5654,6 +5645,21 @@ static netdev_tx_t e1000_xmit_frame(struct sk_buff *skb,
(MAX_SKB_FRAGS * (MAX_SKB_FRAGS *
DIV_ROUND_UP(PAGE_SIZE, DIV_ROUND_UP(PAGE_SIZE,
adapter->tx_fifo_limit) + 2)); adapter->tx_fifo_limit) + 2));
if (!skb->xmit_more ||
netif_xmit_stopped(netdev_get_tx_queue(netdev, 0))) {
if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
e1000e_update_tdt_wa(tx_ring,
tx_ring->next_to_use);
else
writel(tx_ring->next_to_use, tx_ring->tail);
/* we need this if more than one processor can write
* to our tail at a time, it synchronizes IO on
*IA64/Altix systems
*/
mmiowb();
}
} else { } else {
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
tx_ring->buffer_info[first].time_stamp = 0; tx_ring->buffer_info[first].time_stamp = 0;
......
...@@ -97,7 +97,6 @@ static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring, ...@@ -97,7 +97,6 @@ static bool fm10k_alloc_mapped_page(struct fm10k_ring *rx_ring,
*/ */
if (dma_mapping_error(rx_ring->dev, dma)) { if (dma_mapping_error(rx_ring->dev, dma)) {
__free_page(page); __free_page(page);
bi->page = NULL;
rx_ring->rx_stats.alloc_failed++; rx_ring->rx_stats.alloc_failed++;
return false; return false;
...@@ -147,8 +146,8 @@ void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count) ...@@ -147,8 +146,8 @@ void fm10k_alloc_rx_buffers(struct fm10k_ring *rx_ring, u16 cleaned_count)
i -= rx_ring->count; i -= rx_ring->count;
} }
/* clear the hdr_addr for the next_to_use descriptor */ /* clear the status bits for the next_to_use descriptor */
rx_desc->q.hdr_addr = 0; rx_desc->d.staterr = 0;
cleaned_count--; cleaned_count--;
} while (cleaned_count); } while (cleaned_count);
...@@ -194,7 +193,7 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring, ...@@ -194,7 +193,7 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0;
/* transfer page from old buffer to new buffer */ /* transfer page from old buffer to new buffer */
memcpy(new_buff, old_buff, sizeof(struct fm10k_rx_buffer)); *new_buff = *old_buff;
/* sync the buffer for use by the device */ /* sync the buffer for use by the device */
dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma, dma_sync_single_range_for_device(rx_ring->dev, old_buff->dma,
...@@ -203,12 +202,17 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring, ...@@ -203,12 +202,17 @@ static void fm10k_reuse_rx_page(struct fm10k_ring *rx_ring,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
} }
static inline bool fm10k_page_is_reserved(struct page *page)
{
return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
}
static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
struct page *page, struct page *page,
unsigned int truesize) unsigned int truesize)
{ {
/* avoid re-using remote pages */ /* avoid re-using remote pages */
if (unlikely(page_to_nid(page) != numa_mem_id())) if (unlikely(fm10k_page_is_reserved(page)))
return false; return false;
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
...@@ -218,22 +222,19 @@ static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer, ...@@ -218,22 +222,19 @@ static bool fm10k_can_reuse_rx_page(struct fm10k_rx_buffer *rx_buffer,
/* flip page offset to other buffer */ /* flip page offset to other buffer */
rx_buffer->page_offset ^= FM10K_RX_BUFSZ; rx_buffer->page_offset ^= FM10K_RX_BUFSZ;
/* Even if we own the page, we are not allowed to use atomic_set()
* This would break get_page_unless_zero() users.
*/
atomic_inc(&page->_count);
#else #else
/* move offset up to the next cache line */ /* move offset up to the next cache line */
rx_buffer->page_offset += truesize; rx_buffer->page_offset += truesize;
if (rx_buffer->page_offset > (PAGE_SIZE - FM10K_RX_BUFSZ)) if (rx_buffer->page_offset > (PAGE_SIZE - FM10K_RX_BUFSZ))
return false; return false;
/* bump ref count on page before it is given to the stack */
get_page(page);
#endif #endif
/* Even if we own the page, we are not allowed to use atomic_set()
* This would break get_page_unless_zero() users.
*/
atomic_inc(&page->_count);
return true; return true;
} }
...@@ -270,12 +271,12 @@ static bool fm10k_add_rx_frag(struct fm10k_ring *rx_ring, ...@@ -270,12 +271,12 @@ static bool fm10k_add_rx_frag(struct fm10k_ring *rx_ring,
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
/* we can reuse buffer as-is, just make sure it is local */ /* page is not reserved, we can reuse buffer as-is */
if (likely(page_to_nid(page) == numa_mem_id())) if (likely(!fm10k_page_is_reserved(page)))
return true; return true;
/* this page cannot be reused so discard it */ /* this page cannot be reused so discard it */
put_page(page); __free_page(page);
return false; return false;
} }
...@@ -293,7 +294,6 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring, ...@@ -293,7 +294,6 @@ static struct sk_buff *fm10k_fetch_rx_buffer(struct fm10k_ring *rx_ring,
struct page *page; struct page *page;
rx_buffer = &rx_ring->rx_buffer[rx_ring->next_to_clean]; rx_buffer = &rx_ring->rx_buffer[rx_ring->next_to_clean];
page = rx_buffer->page; page = rx_buffer->page;
prefetchw(page); prefetchw(page);
...@@ -727,6 +727,12 @@ static __be16 fm10k_tx_encap_offload(struct sk_buff *skb) ...@@ -727,6 +727,12 @@ static __be16 fm10k_tx_encap_offload(struct sk_buff *skb)
struct ethhdr *eth_hdr; struct ethhdr *eth_hdr;
u8 l4_hdr = 0; u8 l4_hdr = 0;
/* fm10k supports 184 octets of outer+inner headers. Minus 20 for inner L4. */
#define FM10K_MAX_ENCAP_TRANSPORT_OFFSET 164
if (skb_inner_transport_header(skb) - skb_mac_header(skb) >
FM10K_MAX_ENCAP_TRANSPORT_OFFSET)
return 0;
switch (vlan_get_protocol(skb)) { switch (vlan_get_protocol(skb)) {
case htons(ETH_P_IP): case htons(ETH_P_IP):
l4_hdr = ip_hdr(skb)->protocol; l4_hdr = ip_hdr(skb)->protocol;
......
...@@ -1414,13 +1414,12 @@ struct net_device *fm10k_alloc_netdev(void) ...@@ -1414,13 +1414,12 @@ struct net_device *fm10k_alloc_netdev(void)
dev->vlan_features |= dev->features; dev->vlan_features |= dev->features;
/* configure tunnel offloads */ /* configure tunnel offloads */
dev->hw_enc_features = NETIF_F_IP_CSUM | dev->hw_enc_features |= NETIF_F_IP_CSUM |
NETIF_F_TSO | NETIF_F_TSO |
NETIF_F_TSO6 | NETIF_F_TSO6 |
NETIF_F_TSO_ECN | NETIF_F_TSO_ECN |
NETIF_F_GSO_UDP_TUNNEL | NETIF_F_GSO_UDP_TUNNEL |
NETIF_F_IPV6_CSUM | NETIF_F_IPV6_CSUM;
NETIF_F_SG;
/* we want to leave these both on as we cannot disable VLAN tag /* we want to leave these both on as we cannot disable VLAN tag
* insertion or stripping on the hardware since it is contained * insertion or stripping on the hardware since it is contained
......
...@@ -354,7 +354,7 @@ struct fm10k_hw; ...@@ -354,7 +354,7 @@ struct fm10k_hw;
/* Define timeouts for resets and disables */ /* Define timeouts for resets and disables */
#define FM10K_QUEUE_DISABLE_TIMEOUT 100 #define FM10K_QUEUE_DISABLE_TIMEOUT 100
#define FM10K_RESET_TIMEOUT 100 #define FM10K_RESET_TIMEOUT 150
/* VF registers */ /* VF registers */
#define FM10K_VFCTRL 0x00000 #define FM10K_VFCTRL 0x00000
......
...@@ -343,6 +343,9 @@ struct hwmon_buff { ...@@ -343,6 +343,9 @@ struct hwmon_buff {
}; };
#endif #endif
#define IGB_N_EXTTS 2
#define IGB_N_PEROUT 2
#define IGB_N_SDP 4
#define IGB_RETA_SIZE 128 #define IGB_RETA_SIZE 128
/* board specific private data structure */ /* board specific private data structure */
...@@ -439,6 +442,12 @@ struct igb_adapter { ...@@ -439,6 +442,12 @@ struct igb_adapter {
u32 tx_hwtstamp_timeouts; u32 tx_hwtstamp_timeouts;
u32 rx_hwtstamp_cleared; u32 rx_hwtstamp_cleared;
struct ptp_pin_desc sdp_config[IGB_N_SDP];
struct {
struct timespec start;
struct timespec period;
} perout[IGB_N_PEROUT];
char fw_version[32]; char fw_version[32];
#ifdef CONFIG_IGB_HWMON #ifdef CONFIG_IGB_HWMON
struct hwmon_buff *igb_hwmon_buff; struct hwmon_buff *igb_hwmon_buff;
......
...@@ -5384,6 +5384,80 @@ void igb_update_stats(struct igb_adapter *adapter, ...@@ -5384,6 +5384,80 @@ void igb_update_stats(struct igb_adapter *adapter,
} }
} }
static void igb_tsync_interrupt(struct igb_adapter *adapter)
{
struct e1000_hw *hw = &adapter->hw;
struct ptp_clock_event event;
struct timespec ts;
u32 ack = 0, tsauxc, sec, nsec, tsicr = rd32(E1000_TSICR);
if (tsicr & TSINTR_SYS_WRAP) {
event.type = PTP_CLOCK_PPS;
if (adapter->ptp_caps.pps)
ptp_clock_event(adapter->ptp_clock, &event);
else
dev_err(&adapter->pdev->dev, "unexpected SYS WRAP");
ack |= TSINTR_SYS_WRAP;
}
if (tsicr & E1000_TSICR_TXTS) {
/* retrieve hardware timestamp */
schedule_work(&adapter->ptp_tx_work);
ack |= E1000_TSICR_TXTS;
}
if (tsicr & TSINTR_TT0) {
spin_lock(&adapter->tmreg_lock);
ts = timespec_add(adapter->perout[0].start,
adapter->perout[0].period);
wr32(E1000_TRGTTIML0, ts.tv_nsec);
wr32(E1000_TRGTTIMH0, ts.tv_sec);
tsauxc = rd32(E1000_TSAUXC);
tsauxc |= TSAUXC_EN_TT0;
wr32(E1000_TSAUXC, tsauxc);
adapter->perout[0].start = ts;
spin_unlock(&adapter->tmreg_lock);
ack |= TSINTR_TT0;
}
if (tsicr & TSINTR_TT1) {
spin_lock(&adapter->tmreg_lock);
ts = timespec_add(adapter->perout[1].start,
adapter->perout[1].period);
wr32(E1000_TRGTTIML1, ts.tv_nsec);
wr32(E1000_TRGTTIMH1, ts.tv_sec);
tsauxc = rd32(E1000_TSAUXC);
tsauxc |= TSAUXC_EN_TT1;
wr32(E1000_TSAUXC, tsauxc);
adapter->perout[1].start = ts;
spin_unlock(&adapter->tmreg_lock);
ack |= TSINTR_TT1;
}
if (tsicr & TSINTR_AUTT0) {
nsec = rd32(E1000_AUXSTMPL0);
sec = rd32(E1000_AUXSTMPH0);
event.type = PTP_CLOCK_EXTTS;
event.index = 0;
event.timestamp = sec * 1000000000ULL + nsec;
ptp_clock_event(adapter->ptp_clock, &event);
ack |= TSINTR_AUTT0;
}
if (tsicr & TSINTR_AUTT1) {
nsec = rd32(E1000_AUXSTMPL1);
sec = rd32(E1000_AUXSTMPH1);
event.type = PTP_CLOCK_EXTTS;
event.index = 1;
event.timestamp = sec * 1000000000ULL + nsec;
ptp_clock_event(adapter->ptp_clock, &event);
ack |= TSINTR_AUTT1;
}
/* acknowledge the interrupts */
wr32(E1000_TSICR, ack);
}
static irqreturn_t igb_msix_other(int irq, void *data) static irqreturn_t igb_msix_other(int irq, void *data)
{ {
struct igb_adapter *adapter = data; struct igb_adapter *adapter = data;
...@@ -5415,16 +5489,8 @@ static irqreturn_t igb_msix_other(int irq, void *data) ...@@ -5415,16 +5489,8 @@ static irqreturn_t igb_msix_other(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1); mod_timer(&adapter->watchdog_timer, jiffies + 1);
} }
if (icr & E1000_ICR_TS) { if (icr & E1000_ICR_TS)
u32 tsicr = rd32(E1000_TSICR); igb_tsync_interrupt(adapter);
if (tsicr & E1000_TSICR_TXTS) {
/* acknowledge the interrupt */
wr32(E1000_TSICR, E1000_TSICR_TXTS);
/* retrieve hardware timestamp */
schedule_work(&adapter->ptp_tx_work);
}
}
wr32(E1000_EIMS, adapter->eims_other); wr32(E1000_EIMS, adapter->eims_other);
...@@ -6011,8 +6077,12 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) ...@@ -6011,8 +6077,12 @@ static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS; adapter->vf_data[vf].flags |= IGB_VF_FLAG_CTS;
/* reply to reset with ack and vf mac address */ /* reply to reset with ack and vf mac address */
if (!is_zero_ether_addr(vf_mac)) {
msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK; msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
memcpy(addr, vf_mac, ETH_ALEN); memcpy(addr, vf_mac, ETH_ALEN);
} else {
msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_NACK;
}
igb_write_mbx(hw, msgbuf, 3, vf); igb_write_mbx(hw, msgbuf, 3, vf);
} }
...@@ -6203,16 +6273,8 @@ static irqreturn_t igb_intr_msi(int irq, void *data) ...@@ -6203,16 +6273,8 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1); mod_timer(&adapter->watchdog_timer, jiffies + 1);
} }
if (icr & E1000_ICR_TS) { if (icr & E1000_ICR_TS)
u32 tsicr = rd32(E1000_TSICR); igb_tsync_interrupt(adapter);
if (tsicr & E1000_TSICR_TXTS) {
/* acknowledge the interrupt */
wr32(E1000_TSICR, E1000_TSICR_TXTS);
/* retrieve hardware timestamp */
schedule_work(&adapter->ptp_tx_work);
}
}
napi_schedule(&q_vector->napi); napi_schedule(&q_vector->napi);
...@@ -6257,16 +6319,8 @@ static irqreturn_t igb_intr(int irq, void *data) ...@@ -6257,16 +6319,8 @@ static irqreturn_t igb_intr(int irq, void *data)
mod_timer(&adapter->watchdog_timer, jiffies + 1); mod_timer(&adapter->watchdog_timer, jiffies + 1);
} }
if (icr & E1000_ICR_TS) { if (icr & E1000_ICR_TS)
u32 tsicr = rd32(E1000_TSICR); igb_tsync_interrupt(adapter);
if (tsicr & E1000_TSICR_TXTS) {
/* acknowledge the interrupt */
wr32(E1000_TSICR, E1000_TSICR_TXTS);
/* retrieve hardware timestamp */
schedule_work(&adapter->ptp_tx_work);
}
}
napi_schedule(&q_vector->napi); napi_schedule(&q_vector->napi);
...@@ -6527,15 +6581,17 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring, ...@@ -6527,15 +6581,17 @@ static void igb_reuse_rx_page(struct igb_ring *rx_ring,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
} }
static inline bool igb_page_is_reserved(struct page *page)
{
return (page_to_nid(page) != numa_mem_id()) || page->pfmemalloc;
}
static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
struct page *page, struct page *page,
unsigned int truesize) unsigned int truesize)
{ {
/* avoid re-using remote pages */ /* avoid re-using remote pages */
if (unlikely(page_to_nid(page) != numa_node_id())) if (unlikely(igb_page_is_reserved(page)))
return false;
if (unlikely(page->pfmemalloc))
return false; return false;
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
...@@ -6545,22 +6601,19 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer, ...@@ -6545,22 +6601,19 @@ static bool igb_can_reuse_rx_page(struct igb_rx_buffer *rx_buffer,
/* flip page offset to other buffer */ /* flip page offset to other buffer */
rx_buffer->page_offset ^= IGB_RX_BUFSZ; rx_buffer->page_offset ^= IGB_RX_BUFSZ;
/* Even if we own the page, we are not allowed to use atomic_set()
* This would break get_page_unless_zero() users.
*/
atomic_inc(&page->_count);
#else #else
/* move offset up to the next cache line */ /* move offset up to the next cache line */
rx_buffer->page_offset += truesize; rx_buffer->page_offset += truesize;
if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ)) if (rx_buffer->page_offset > (PAGE_SIZE - IGB_RX_BUFSZ))
return false; return false;
/* bump ref count on page before it is given to the stack */
get_page(page);
#endif #endif
/* Even if we own the page, we are not allowed to use atomic_set()
* This would break get_page_unless_zero() users.
*/
atomic_inc(&page->_count);
return true; return true;
} }
...@@ -6603,13 +6656,12 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring, ...@@ -6603,13 +6656,12 @@ static bool igb_add_rx_frag(struct igb_ring *rx_ring,
memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long)));
/* we can reuse buffer as-is, just make sure it is local */ /* page is not reserved, we can reuse buffer as-is */
if (likely((page_to_nid(page) == numa_node_id()) && if (likely(!igb_page_is_reserved(page)))
!page->pfmemalloc))
return true; return true;
/* this page cannot be reused so discard it */ /* this page cannot be reused so discard it */
put_page(page); __free_page(page);
return false; return false;
} }
...@@ -6627,7 +6679,6 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring, ...@@ -6627,7 +6679,6 @@ static struct sk_buff *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
struct page *page; struct page *page;
rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
page = rx_buffer->page; page = rx_buffer->page;
prefetchw(page); prefetchw(page);
...@@ -7042,8 +7093,8 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count) ...@@ -7042,8 +7093,8 @@ void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
i -= rx_ring->count; i -= rx_ring->count;
} }
/* clear the hdr_addr for the next_to_use descriptor */ /* clear the status bits for the next_to_use descriptor */
rx_desc->read.hdr_addr = 0; rx_desc->wb.upper.status_error = 0;
cleaned_count--; cleaned_count--;
} while (cleaned_count); } while (cleaned_count);
......
...@@ -355,12 +355,239 @@ static int igb_ptp_settime_i210(struct ptp_clock_info *ptp, ...@@ -355,12 +355,239 @@ static int igb_ptp_settime_i210(struct ptp_clock_info *ptp,
return 0; return 0;
} }
static void igb_pin_direction(int pin, int input, u32 *ctrl, u32 *ctrl_ext)
{
u32 *ptr = pin < 2 ? ctrl : ctrl_ext;
u32 mask[IGB_N_SDP] = {
E1000_CTRL_SDP0_DIR,
E1000_CTRL_SDP1_DIR,
E1000_CTRL_EXT_SDP2_DIR,
E1000_CTRL_EXT_SDP3_DIR,
};
if (input)
*ptr &= ~mask[pin];
else
*ptr |= mask[pin];
}
static void igb_pin_extts(struct igb_adapter *igb, int chan, int pin)
{
struct e1000_hw *hw = &igb->hw;
u32 aux0_sel_sdp[IGB_N_SDP] = {
AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3,
};
u32 aux1_sel_sdp[IGB_N_SDP] = {
AUX1_SEL_SDP0, AUX1_SEL_SDP1, AUX1_SEL_SDP2, AUX1_SEL_SDP3,
};
u32 ts_sdp_en[IGB_N_SDP] = {
TS_SDP0_EN, TS_SDP1_EN, TS_SDP2_EN, TS_SDP3_EN,
};
u32 ctrl, ctrl_ext, tssdp = 0;
ctrl = rd32(E1000_CTRL);
ctrl_ext = rd32(E1000_CTRL_EXT);
tssdp = rd32(E1000_TSSDP);
igb_pin_direction(pin, 1, &ctrl, &ctrl_ext);
/* Make sure this pin is not enabled as an output. */
tssdp &= ~ts_sdp_en[pin];
if (chan == 1) {
tssdp &= ~AUX1_SEL_SDP3;
tssdp |= aux1_sel_sdp[pin] | AUX1_TS_SDP_EN;
} else {
tssdp &= ~AUX0_SEL_SDP3;
tssdp |= aux0_sel_sdp[pin] | AUX0_TS_SDP_EN;
}
wr32(E1000_TSSDP, tssdp);
wr32(E1000_CTRL, ctrl);
wr32(E1000_CTRL_EXT, ctrl_ext);
}
static void igb_pin_perout(struct igb_adapter *igb, int chan, int pin)
{
struct e1000_hw *hw = &igb->hw;
u32 aux0_sel_sdp[IGB_N_SDP] = {
AUX0_SEL_SDP0, AUX0_SEL_SDP1, AUX0_SEL_SDP2, AUX0_SEL_SDP3,
};
u32 aux1_sel_sdp[IGB_N_SDP] = {
AUX1_SEL_SDP0, AUX1_SEL_SDP1, AUX1_SEL_SDP2, AUX1_SEL_SDP3,
};
u32 ts_sdp_en[IGB_N_SDP] = {
TS_SDP0_EN, TS_SDP1_EN, TS_SDP2_EN, TS_SDP3_EN,
};
u32 ts_sdp_sel_tt0[IGB_N_SDP] = {
TS_SDP0_SEL_TT0, TS_SDP1_SEL_TT0,
TS_SDP2_SEL_TT0, TS_SDP3_SEL_TT0,
};
u32 ts_sdp_sel_tt1[IGB_N_SDP] = {
TS_SDP0_SEL_TT1, TS_SDP1_SEL_TT1,
TS_SDP2_SEL_TT1, TS_SDP3_SEL_TT1,
};
u32 ts_sdp_sel_clr[IGB_N_SDP] = {
TS_SDP0_SEL_FC1, TS_SDP1_SEL_FC1,
TS_SDP2_SEL_FC1, TS_SDP3_SEL_FC1,
};
u32 ctrl, ctrl_ext, tssdp = 0;
ctrl = rd32(E1000_CTRL);
ctrl_ext = rd32(E1000_CTRL_EXT);
tssdp = rd32(E1000_TSSDP);
igb_pin_direction(pin, 0, &ctrl, &ctrl_ext);
/* Make sure this pin is not enabled as an input. */
if ((tssdp & AUX0_SEL_SDP3) == aux0_sel_sdp[pin])
tssdp &= ~AUX0_TS_SDP_EN;
if ((tssdp & AUX1_SEL_SDP3) == aux1_sel_sdp[pin])
tssdp &= ~AUX1_TS_SDP_EN;
tssdp &= ~ts_sdp_sel_clr[pin];
if (chan == 1)
tssdp |= ts_sdp_sel_tt1[pin];
else
tssdp |= ts_sdp_sel_tt0[pin];
tssdp |= ts_sdp_en[pin];
wr32(E1000_TSSDP, tssdp);
wr32(E1000_CTRL, ctrl);
wr32(E1000_CTRL_EXT, ctrl_ext);
}
static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on)
{
struct igb_adapter *igb =
container_of(ptp, struct igb_adapter, ptp_caps);
struct e1000_hw *hw = &igb->hw;
u32 tsauxc, tsim, tsauxc_mask, tsim_mask, trgttiml, trgttimh;
unsigned long flags;
struct timespec ts;
int pin;
s64 ns;
switch (rq->type) {
case PTP_CLK_REQ_EXTTS:
if (on) {
pin = ptp_find_pin(igb->ptp_clock, PTP_PF_EXTTS,
rq->extts.index);
if (pin < 0)
return -EBUSY;
}
if (rq->extts.index == 1) {
tsauxc_mask = TSAUXC_EN_TS1;
tsim_mask = TSINTR_AUTT1;
} else {
tsauxc_mask = TSAUXC_EN_TS0;
tsim_mask = TSINTR_AUTT0;
}
spin_lock_irqsave(&igb->tmreg_lock, flags);
tsauxc = rd32(E1000_TSAUXC);
tsim = rd32(E1000_TSIM);
if (on) {
igb_pin_extts(igb, rq->extts.index, pin);
tsauxc |= tsauxc_mask;
tsim |= tsim_mask;
} else {
tsauxc &= ~tsauxc_mask;
tsim &= ~tsim_mask;
}
wr32(E1000_TSAUXC, tsauxc);
wr32(E1000_TSIM, tsim);
spin_unlock_irqrestore(&igb->tmreg_lock, flags);
return 0;
case PTP_CLK_REQ_PEROUT:
if (on) {
pin = ptp_find_pin(igb->ptp_clock, PTP_PF_PEROUT,
rq->perout.index);
if (pin < 0)
return -EBUSY;
}
ts.tv_sec = rq->perout.period.sec;
ts.tv_nsec = rq->perout.period.nsec;
ns = timespec_to_ns(&ts);
ns = ns >> 1;
if (on && ns < 500000LL) {
/* 2k interrupts per second is an awful lot. */
return -EINVAL;
}
ts = ns_to_timespec(ns);
if (rq->perout.index == 1) {
tsauxc_mask = TSAUXC_EN_TT1;
tsim_mask = TSINTR_TT1;
trgttiml = E1000_TRGTTIML1;
trgttimh = E1000_TRGTTIMH1;
} else {
tsauxc_mask = TSAUXC_EN_TT0;
tsim_mask = TSINTR_TT0;
trgttiml = E1000_TRGTTIML0;
trgttimh = E1000_TRGTTIMH0;
}
spin_lock_irqsave(&igb->tmreg_lock, flags);
tsauxc = rd32(E1000_TSAUXC);
tsim = rd32(E1000_TSIM);
if (on) {
int i = rq->perout.index;
igb_pin_perout(igb, i, pin);
igb->perout[i].start.tv_sec = rq->perout.start.sec;
igb->perout[i].start.tv_nsec = rq->perout.start.nsec;
igb->perout[i].period.tv_sec = ts.tv_sec;
igb->perout[i].period.tv_nsec = ts.tv_nsec;
wr32(trgttiml, rq->perout.start.sec);
wr32(trgttimh, rq->perout.start.nsec);
tsauxc |= tsauxc_mask;
tsim |= tsim_mask;
} else {
tsauxc &= ~tsauxc_mask;
tsim &= ~tsim_mask;
}
wr32(E1000_TSAUXC, tsauxc);
wr32(E1000_TSIM, tsim);
spin_unlock_irqrestore(&igb->tmreg_lock, flags);
return 0;
case PTP_CLK_REQ_PPS:
spin_lock_irqsave(&igb->tmreg_lock, flags);
tsim = rd32(E1000_TSIM);
if (on)
tsim |= TSINTR_SYS_WRAP;
else
tsim &= ~TSINTR_SYS_WRAP;
wr32(E1000_TSIM, tsim);
spin_unlock_irqrestore(&igb->tmreg_lock, flags);
return 0;
}
return -EOPNOTSUPP;
}
static int igb_ptp_feature_enable(struct ptp_clock_info *ptp, static int igb_ptp_feature_enable(struct ptp_clock_info *ptp,
struct ptp_clock_request *rq, int on) struct ptp_clock_request *rq, int on)
{ {
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static int igb_ptp_verify_pin(struct ptp_clock_info *ptp, unsigned int pin,
enum ptp_pin_function func, unsigned int chan)
{
switch (func) {
case PTP_PF_NONE:
case PTP_PF_EXTTS:
case PTP_PF_PEROUT:
break;
case PTP_PF_PHYSYNC:
return -1;
}
return 0;
}
/** /**
* igb_ptp_tx_work * igb_ptp_tx_work
* @work: pointer to work struct * @work: pointer to work struct
...@@ -751,6 +978,7 @@ void igb_ptp_init(struct igb_adapter *adapter) ...@@ -751,6 +978,7 @@ void igb_ptp_init(struct igb_adapter *adapter)
{ {
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
int i;
switch (hw->mac.type) { switch (hw->mac.type) {
case e1000_82576: case e1000_82576:
...@@ -793,16 +1021,27 @@ void igb_ptp_init(struct igb_adapter *adapter) ...@@ -793,16 +1021,27 @@ void igb_ptp_init(struct igb_adapter *adapter)
break; break;
case e1000_i210: case e1000_i210:
case e1000_i211: case e1000_i211:
for (i = 0; i < IGB_N_SDP; i++) {
struct ptp_pin_desc *ppd = &adapter->sdp_config[i];
snprintf(ppd->name, sizeof(ppd->name), "SDP%d", i);
ppd->index = i;
ppd->func = PTP_PF_NONE;
}
snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr); snprintf(adapter->ptp_caps.name, 16, "%pm", netdev->dev_addr);
adapter->ptp_caps.owner = THIS_MODULE; adapter->ptp_caps.owner = THIS_MODULE;
adapter->ptp_caps.max_adj = 62499999; adapter->ptp_caps.max_adj = 62499999;
adapter->ptp_caps.n_ext_ts = 0; adapter->ptp_caps.n_ext_ts = IGB_N_EXTTS;
adapter->ptp_caps.pps = 0; adapter->ptp_caps.n_per_out = IGB_N_PEROUT;
adapter->ptp_caps.n_pins = IGB_N_SDP;
adapter->ptp_caps.pps = 1;
adapter->ptp_caps.pin_config = adapter->sdp_config;
adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580; adapter->ptp_caps.adjfreq = igb_ptp_adjfreq_82580;
adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210; adapter->ptp_caps.adjtime = igb_ptp_adjtime_i210;
adapter->ptp_caps.gettime = igb_ptp_gettime_i210; adapter->ptp_caps.gettime = igb_ptp_gettime_i210;
adapter->ptp_caps.settime = igb_ptp_settime_i210; adapter->ptp_caps.settime = igb_ptp_settime_i210;
adapter->ptp_caps.enable = igb_ptp_feature_enable; adapter->ptp_caps.enable = igb_ptp_feature_enable_i210;
adapter->ptp_caps.verify = igb_ptp_verify_pin;
/* Enable the timer functions by clearing bit 31. */ /* Enable the timer functions by clearing bit 31. */
wr32(E1000_TSAUXC, 0x0); wr32(E1000_TSAUXC, 0x0);
break; break;
...@@ -900,6 +1139,7 @@ void igb_ptp_stop(struct igb_adapter *adapter) ...@@ -900,6 +1139,7 @@ void igb_ptp_stop(struct igb_adapter *adapter)
void igb_ptp_reset(struct igb_adapter *adapter) void igb_ptp_reset(struct igb_adapter *adapter)
{ {
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
unsigned long flags;
if (!(adapter->flags & IGB_FLAG_PTP)) if (!(adapter->flags & IGB_FLAG_PTP))
return; return;
...@@ -907,6 +1147,8 @@ void igb_ptp_reset(struct igb_adapter *adapter) ...@@ -907,6 +1147,8 @@ void igb_ptp_reset(struct igb_adapter *adapter)
/* reset the tstamp_config */ /* reset the tstamp_config */
igb_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); igb_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config);
spin_lock_irqsave(&adapter->tmreg_lock, flags);
switch (adapter->hw.mac.type) { switch (adapter->hw.mac.type) {
case e1000_82576: case e1000_82576:
/* Dial the nominal frequency. */ /* Dial the nominal frequency. */
...@@ -917,23 +1159,25 @@ void igb_ptp_reset(struct igb_adapter *adapter) ...@@ -917,23 +1159,25 @@ void igb_ptp_reset(struct igb_adapter *adapter)
case e1000_i350: case e1000_i350:
case e1000_i210: case e1000_i210:
case e1000_i211: case e1000_i211:
/* Enable the timer functions and interrupts. */
wr32(E1000_TSAUXC, 0x0); wr32(E1000_TSAUXC, 0x0);
wr32(E1000_TSSDP, 0x0);
wr32(E1000_TSIM, TSYNC_INTERRUPTS); wr32(E1000_TSIM, TSYNC_INTERRUPTS);
wr32(E1000_IMS, E1000_IMS_TS); wr32(E1000_IMS, E1000_IMS_TS);
break; break;
default: default:
/* No work to do. */ /* No work to do. */
return; goto out;
} }
/* Re-initialize the timer. */ /* Re-initialize the timer. */
if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) { if ((hw->mac.type == e1000_i210) || (hw->mac.type == e1000_i211)) {
struct timespec ts = ktime_to_timespec(ktime_get_real()); struct timespec ts = ktime_to_timespec(ktime_get_real());
igb_ptp_settime_i210(&adapter->ptp_caps, &ts); igb_ptp_write_i210(adapter, &ts);
} else { } else {
timecounter_init(&adapter->tc, &adapter->cc, timecounter_init(&adapter->tc, &adapter->cc,
ktime_to_ns(ktime_get_real())); ktime_to_ns(ktime_get_real()));
} }
out:
spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
} }
...@@ -925,6 +925,9 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -925,6 +925,9 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
/* Free up any pending old buffers before queueing new ones. */ /* Free up any pending old buffers before queueing new ones. */
free_old_xmit_skbs(sq); free_old_xmit_skbs(sq);
/* timestamp packet in software */
skb_tx_timestamp(skb);
/* Try to transmit */ /* Try to transmit */
err = xmit_skb(sq, skb); err = xmit_skb(sq, skb);
...@@ -1376,6 +1379,7 @@ static const struct ethtool_ops virtnet_ethtool_ops = { ...@@ -1376,6 +1379,7 @@ static const struct ethtool_ops virtnet_ethtool_ops = {
.get_ringparam = virtnet_get_ringparam, .get_ringparam = virtnet_get_ringparam,
.set_channels = virtnet_set_channels, .set_channels = virtnet_set_channels,
.get_channels = virtnet_get_channels, .get_channels = virtnet_get_channels,
.get_ts_info = ethtool_op_get_ts_info,
}; };
#define MIN_MTU 68 #define MIN_MTU 68
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment