Commit 0e4767aa authored by David S. Miller's avatar David S. Miller

Merge branch 'sfc'

Shradha Shah says:

====================
Cleanup patches for the SFC driver

This patch set consists of some cleanup and housekeeping
patches for the sfc driver.
These patches help to reduce the differences between the in-
tree and out-of-tree driver.

Ben Hutchings (12):
  sfc: Cache skb->data in local variable in efx_ptp_rx()
  sfc: Rewrite adjustment of PPS event in a clearer way
  sfc: Replace TSOH_OFFSET with the equivalent NET_IP_ALIGN
  sfc: Rename 'use_options' variable in tso_start() to clearer
    'use_opt_desc'
  sfc: Remove unused definitions of EF10 user-mode DMA descriptors
  sfc: Correct comment about number of TX queues used on EF10
  sfc: Preserve rx_frm_trunc counters when resizing DMA rings
  sfc: Use canonical pointer type for MAC address in
    efx_set_mac_address()
  sfc: Update product naming
  sfc: Cosmetic changes to self-test from the out-of-tree driver
  sfc: Fail self-test with -EBUSY, not -EIO, if the device is busy
  sfc: Add/remove blank lines to taste

Laurence Evans (1):
  sfc: Removed adhoc scheme to rate limit PTP event queue overflow
    message
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a146b591 5b3b7608
......@@ -172,8 +172,8 @@ static int efx_ef10_probe(struct efx_nic *efx)
struct efx_ef10_nic_data *nic_data;
int i, rc;
/* We can have one VI for each 8K region. However we need
* multiple TX queues per channel.
/* We can have one VI for each 8K region. However, until we
* use TX option descriptors we need two TX queues per channel.
*/
efx->max_channels =
min_t(unsigned int,
......
......@@ -227,36 +227,6 @@
#define ESF_DZ_RX_KER_BUF_ADDR_LBN 0
#define ESF_DZ_RX_KER_BUF_ADDR_WIDTH 48
/* RX_USER_DESC */
#define ESF_DZ_RX_USR_RESERVED_LBN 62
#define ESF_DZ_RX_USR_RESERVED_WIDTH 2
#define ESF_DZ_RX_USR_BYTE_CNT_LBN 48
#define ESF_DZ_RX_USR_BYTE_CNT_WIDTH 14
#define ESF_DZ_RX_USR_BUF_PAGE_SIZE_LBN 44
#define ESF_DZ_RX_USR_BUF_PAGE_SIZE_WIDTH 4
#define ESE_DZ_USR_BUF_PAGE_SZ_4MB 10
#define ESE_DZ_USR_BUF_PAGE_SZ_1MB 8
#define ESE_DZ_USR_BUF_PAGE_SZ_64KB 4
#define ESE_DZ_USR_BUF_PAGE_SZ_4KB 0
#define ESF_DZ_RX_USR_BUF_ID_OFFSET_LBN 0
#define ESF_DZ_RX_USR_BUF_ID_OFFSET_WIDTH 44
#define ESF_DZ_RX_USR_4KBPS_BUF_ID_LBN 12
#define ESF_DZ_RX_USR_4KBPS_BUF_ID_WIDTH 32
#define ESF_DZ_RX_USR_64KBPS_BUF_ID_LBN 16
#define ESF_DZ_RX_USR_64KBPS_BUF_ID_WIDTH 28
#define ESF_DZ_RX_USR_1MBPS_BUF_ID_LBN 20
#define ESF_DZ_RX_USR_1MBPS_BUF_ID_WIDTH 24
#define ESF_DZ_RX_USR_4MBPS_BUF_ID_LBN 22
#define ESF_DZ_RX_USR_4MBPS_BUF_ID_WIDTH 22
#define ESF_DZ_RX_USR_4MBPS_BYTE_OFFSET_LBN 0
#define ESF_DZ_RX_USR_4MBPS_BYTE_OFFSET_WIDTH 22
#define ESF_DZ_RX_USR_1MBPS_BYTE_OFFSET_LBN 0
#define ESF_DZ_RX_USR_1MBPS_BYTE_OFFSET_WIDTH 20
#define ESF_DZ_RX_USR_64KBPS_BYTE_OFFSET_LBN 0
#define ESF_DZ_RX_USR_64KBPS_BYTE_OFFSET_WIDTH 16
#define ESF_DZ_RX_USR_4KBPS_BYTE_OFFSET_LBN 0
#define ESF_DZ_RX_USR_4KBPS_BYTE_OFFSET_WIDTH 12
/* TX_CSUM_TSTAMP_DESC */
#define ESF_DZ_TX_DESC_IS_OPT_LBN 63
#define ESF_DZ_TX_DESC_IS_OPT_WIDTH 1
......@@ -338,37 +308,6 @@
#define ESF_DZ_TX_TSO_TCP_SEQNO_LBN 0
#define ESF_DZ_TX_TSO_TCP_SEQNO_WIDTH 32
/* TX_USER_DESC */
#define ESF_DZ_TX_USR_TYPE_LBN 63
#define ESF_DZ_TX_USR_TYPE_WIDTH 1
#define ESF_DZ_TX_USR_CONT_LBN 62
#define ESF_DZ_TX_USR_CONT_WIDTH 1
#define ESF_DZ_TX_USR_BYTE_CNT_LBN 48
#define ESF_DZ_TX_USR_BYTE_CNT_WIDTH 14
#define ESF_DZ_TX_USR_BUF_PAGE_SIZE_LBN 44
#define ESF_DZ_TX_USR_BUF_PAGE_SIZE_WIDTH 4
#define ESE_DZ_USR_BUF_PAGE_SZ_4MB 10
#define ESE_DZ_USR_BUF_PAGE_SZ_1MB 8
#define ESE_DZ_USR_BUF_PAGE_SZ_64KB 4
#define ESE_DZ_USR_BUF_PAGE_SZ_4KB 0
#define ESF_DZ_TX_USR_BUF_ID_OFFSET_LBN 0
#define ESF_DZ_TX_USR_BUF_ID_OFFSET_WIDTH 44
#define ESF_DZ_TX_USR_4KBPS_BUF_ID_LBN 12
#define ESF_DZ_TX_USR_4KBPS_BUF_ID_WIDTH 32
#define ESF_DZ_TX_USR_64KBPS_BUF_ID_LBN 16
#define ESF_DZ_TX_USR_64KBPS_BUF_ID_WIDTH 28
#define ESF_DZ_TX_USR_1MBPS_BUF_ID_LBN 20
#define ESF_DZ_TX_USR_1MBPS_BUF_ID_WIDTH 24
#define ESF_DZ_TX_USR_4MBPS_BUF_ID_LBN 22
#define ESF_DZ_TX_USR_4MBPS_BUF_ID_WIDTH 22
#define ESF_DZ_TX_USR_4MBPS_BYTE_OFFSET_LBN 0
#define ESF_DZ_TX_USR_4MBPS_BYTE_OFFSET_WIDTH 22
#define ESF_DZ_TX_USR_1MBPS_BYTE_OFFSET_LBN 0
#define ESF_DZ_TX_USR_1MBPS_BYTE_OFFSET_WIDTH 20
#define ESF_DZ_TX_USR_64KBPS_BYTE_OFFSET_LBN 0
#define ESF_DZ_TX_USR_64KBPS_BYTE_OFFSET_WIDTH 16
#define ESF_DZ_TX_USR_4KBPS_BYTE_OFFSET_LBN 0
#define ESF_DZ_TX_USR_4KBPS_BYTE_OFFSET_WIDTH 12
/*************************************************************************/
/* TX_DESC_UPD_REG: Transmit descriptor update register.
......
......@@ -503,8 +503,6 @@ static int efx_probe_channel(struct efx_channel *channel)
goto fail;
}
channel->n_rx_frm_trunc = 0;
return 0;
fail:
......@@ -2115,7 +2113,7 @@ static int efx_set_mac_address(struct net_device *net_dev, void *data)
{
struct efx_nic *efx = netdev_priv(net_dev);
struct sockaddr *addr = data;
char *new_addr = addr->sa_data;
u8 *new_addr = addr->sa_data;
if (!is_valid_ether_addr(new_addr)) {
netif_err(efx, drv, efx->net_dev,
......@@ -3273,6 +3271,6 @@ module_exit(efx_exit_module);
MODULE_AUTHOR("Solarflare Communications and "
"Michael Brown <mbrown@fensystems.co.uk>");
MODULE_DESCRIPTION("Solarflare Communications network driver");
MODULE_DESCRIPTION("Solarflare network driver");
MODULE_LICENSE("GPL");
MODULE_DEVICE_TABLE(pci, efx_pci_table);
......@@ -14,7 +14,7 @@
#include "net_driver.h"
#include "filter.h"
/* Solarstorm controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
/* All controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
#define EFX_MEM_BAR 2
/* TX */
......
......@@ -251,6 +251,9 @@ static void efx_fill_test(unsigned int test_index, u8 *strings, u64 *data,
* @test_index: Starting index of the test
* @strings: Ethtool strings, or %NULL
* @data: Ethtool test results, or %NULL
*
* Fill in a block of loopback self-test entries. Return new test
* index.
*/
static int efx_fill_loopback_test(struct efx_nic *efx,
struct efx_loopback_self_tests *lb_tests,
......@@ -290,6 +293,12 @@ static int efx_fill_loopback_test(struct efx_nic *efx,
* @tests: Efx self-test results structure, or %NULL
* @strings: Ethtool strings, or %NULL
* @data: Ethtool test results, or %NULL
*
* Get self-test number of strings, strings, and/or test results.
* Return number of strings (== number of test results).
*
* The reason for merging these three functions is to make sure that
* they can never be inconsistent.
*/
static int efx_ethtool_fill_self_tests(struct efx_nic *efx,
struct efx_self_tests *tests,
......@@ -444,7 +453,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
{
struct efx_nic *efx = netdev_priv(net_dev);
struct efx_self_tests *efx_tests;
int already_up;
bool already_up;
int rc = -ENOMEM;
efx_tests = kzalloc(sizeof(*efx_tests), GFP_KERNEL);
......@@ -452,8 +461,8 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
goto fail;
if (efx->state != STATE_READY) {
rc = -EIO;
goto fail1;
rc = -EBUSY;
goto out;
}
netif_info(efx, drv, efx->net_dev, "starting %sline testing\n",
......@@ -466,7 +475,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
if (rc) {
netif_err(efx, drv, efx->net_dev,
"failed opening device.\n");
goto fail1;
goto out;
}
}
......@@ -479,8 +488,7 @@ static void efx_ethtool_self_test(struct net_device *net_dev,
rc == 0 ? "passed" : "failed",
(test->flags & ETH_TEST_FL_OFFLINE) ? "off" : "on");
fail1:
/* Fill ethtool results structures */
out:
efx_ethtool_fill_self_tests(efx, efx_tests, NULL, data);
kfree(efx_tests);
fail:
......@@ -691,7 +699,6 @@ static void efx_ethtool_get_pauseparam(struct net_device *net_dev,
pause->autoneg = !!(efx->wanted_fc & EFX_FC_AUTO);
}
static void efx_ethtool_get_wol(struct net_device *net_dev,
struct ethtool_wolinfo *wol)
{
......
......@@ -422,7 +422,6 @@ static inline void falcon_irq_ack_a1(struct efx_nic *efx)
efx_readd(efx, &reg, FR_AA_WORK_AROUND_BROKEN_PCI_READS);
}
static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
{
struct efx_nic *efx = dev_id;
......@@ -467,6 +466,7 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
efx_schedule_channel_irq(efx_get_channel(efx, 1));
return IRQ_HANDLED;
}
/**************************************************************************
*
* RSS
......@@ -1358,6 +1358,7 @@ static void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
case 100: link_speed = 1; break;
default: link_speed = 0; break;
}
/* MAC_LINK_STATUS controls MAC backpressure but doesn't work
* as advertised. Disable to ensure packets are not
* indefinitely held and TX queue can be flushed at any point
......@@ -2868,4 +2869,3 @@ const struct efx_nic_type falcon_b0_nic_type = {
.mcdi_max_ver = -1,
.max_rx_ip_filters = FR_BZ_RX_FILTER_TBL0_ROWS,
};
......@@ -311,7 +311,6 @@ static inline void efx_farch_push_tx_desc(struct efx_tx_queue *tx_queue,
*/
void efx_farch_tx_write(struct efx_tx_queue *tx_queue)
{
struct efx_tx_buffer *buffer;
efx_qword_t *txd;
unsigned write_ptr;
......@@ -1609,7 +1608,6 @@ irqreturn_t efx_farch_msi_interrupt(int irq, void *dev_id)
return IRQ_HANDLED;
}
/* Setup RSS indirection table.
* This maps from the hash value of the packet to RXQ
*/
......
......@@ -1323,7 +1323,6 @@ static inline struct efx_rx_buffer *efx_rx_buffer(struct efx_rx_queue *rx_queue,
return &rx_queue->buffer[index];
}
/**
* EFX_MAX_FRAME_LEN - calculate maximum frame length
*
......
......@@ -530,4 +530,3 @@ void efx_nic_fix_nodesc_drop_stat(struct efx_nic *efx, u64 *rx_nodesc_drops)
efx->rx_nodesc_drops_prev_state = !!(efx->net_dev->flags & IFF_UP);
*rx_nodesc_drops -= efx->rx_nodesc_drops_while_down;
}
......@@ -223,7 +223,6 @@ struct efx_ptp_timeset {
* @evt_list: List of MC receive events awaiting packets
* @evt_free_list: List of free events
* @evt_lock: Lock for manipulating evt_list and evt_free_list
* @evt_overflow: Boolean indicating that event list has overflowed
* @rx_evts: Instantiated events (on evt_list and evt_free_list)
* @workwq: Work queue for processing pending PTP operations
* @work: Work task
......@@ -275,7 +274,6 @@ struct efx_ptp_data {
struct list_head evt_list;
struct list_head evt_free_list;
spinlock_t evt_lock;
bool evt_overflow;
struct efx_ptp_event_rx rx_evts[MAX_RECEIVE_EVENTS];
struct workqueue_struct *workwq;
struct work_struct work;
......@@ -768,37 +766,36 @@ efx_ptp_process_times(struct efx_nic *efx, MCDI_DECLARE_STRUCT_PTR(synch_buf),
return -EAGAIN;
}
/* Convert the NIC time into kernel time. No correction is required-
* this time is the output of a firmware process.
*/
mc_time = ptp->nic_to_kernel_time(ptp->timeset[last_good].major,
ptp->timeset[last_good].minor, 0);
/* Calculate delay from actual PPS to last_time */
delta = ktime_to_timespec(mc_time);
delta.tv_nsec +=
last_time->ts_real.tv_nsec -
(ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK);
/* It is possible that the seconds rolled over between taking
/* Calculate delay from last good sync (host time) to last_time.
* It is possible that the seconds rolled over between taking
* the start reading and the last value written by the host. The
* timescales are such that a gap of more than one second is never
* expected.
* expected. delta is *not* normalised.
*/
start_sec = ptp->timeset[last_good].host_start >> MC_NANOSECOND_BITS;
last_sec = last_time->ts_real.tv_sec & MC_SECOND_MASK;
if (start_sec != last_sec) {
if (((start_sec + 1) & MC_SECOND_MASK) != last_sec) {
netif_warn(efx, hw, efx->net_dev,
"PTP bad synchronisation seconds\n");
return -EAGAIN;
} else {
delta.tv_sec = 1;
}
} else {
delta.tv_sec = 0;
if (start_sec != last_sec &&
((start_sec + 1) & MC_SECOND_MASK) != last_sec) {
netif_warn(efx, hw, efx->net_dev,
"PTP bad synchronisation seconds\n");
return -EAGAIN;
}
delta.tv_sec = (last_sec - start_sec) & 1;
delta.tv_nsec =
last_time->ts_real.tv_nsec -
(ptp->timeset[last_good].host_start & MC_NANOSECOND_MASK);
/* Convert the NIC time at last good sync into kernel time.
* No correction is required - this time is the output of a
* firmware process.
*/
mc_time = ptp->nic_to_kernel_time(ptp->timeset[last_good].major,
ptp->timeset[last_good].minor, 0);
/* Calculate delay from NIC top of second to last_time */
delta.tv_nsec += ktime_to_timespec(mc_time).tv_nsec;
/* Set PPS timestamp to match NIC top of second */
ptp->host_time_pps = *last_time;
pps_sub_ts(&ptp->host_time_pps, delta);
......@@ -941,11 +938,6 @@ static void efx_ptp_drop_time_expired_events(struct efx_nic *efx)
}
}
}
/* If the event overflow flag is set and the event list is now empty
* clear the flag to re-enable the overflow warning message.
*/
if (ptp->evt_overflow && list_empty(&ptp->evt_list))
ptp->evt_overflow = false;
spin_unlock_bh(&ptp->evt_lock);
}
......@@ -989,11 +981,6 @@ static enum ptp_packet_state efx_ptp_match_rx(struct efx_nic *efx,
break;
}
}
/* If the event overflow flag is set and the event list is now empty
* clear the flag to re-enable the overflow warning message.
*/
if (ptp->evt_overflow && list_empty(&ptp->evt_list))
ptp->evt_overflow = false;
spin_unlock_bh(&ptp->evt_lock);
return rc;
......@@ -1147,7 +1134,6 @@ static int efx_ptp_stop(struct efx_nic *efx)
list_for_each_safe(cursor, next, &efx->ptp_data->evt_list) {
list_move(cursor, &efx->ptp_data->evt_free_list);
}
ptp->evt_overflow = false;
spin_unlock_bh(&efx->ptp_data->evt_lock);
return rc;
......@@ -1253,7 +1239,6 @@ int efx_ptp_probe(struct efx_nic *efx, struct efx_channel *channel)
spin_lock_init(&ptp->evt_lock);
for (pos = 0; pos < MAX_RECEIVE_EVENTS; pos++)
list_add(&ptp->rx_evts[pos].link, &ptp->evt_free_list);
ptp->evt_overflow = false;
/* Get the NIC PTP attributes and set up time conversions */
rc = efx_ptp_get_attributes(efx);
......@@ -1380,6 +1365,7 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
struct efx_ptp_match *match = (struct efx_ptp_match *)skb->cb;
u8 *match_data_012, *match_data_345;
unsigned int version;
u8 *data;
match->expiry = jiffies + msecs_to_jiffies(PKT_EVENT_LIFETIME_MS);
......@@ -1388,7 +1374,8 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
if (!pskb_may_pull(skb, PTP_V1_MIN_LENGTH)) {
return false;
}
version = ntohs(*(__be16 *)&skb->data[PTP_V1_VERSION_OFFSET]);
data = skb->data;
version = ntohs(*(__be16 *)&data[PTP_V1_VERSION_OFFSET]);
if (version != PTP_VERSION_V1) {
return false;
}
......@@ -1396,13 +1383,14 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
/* PTP V1 uses all six bytes of the UUID to match the packet
* to the timestamp
*/
match_data_012 = skb->data + PTP_V1_UUID_OFFSET;
match_data_345 = skb->data + PTP_V1_UUID_OFFSET + 3;
match_data_012 = data + PTP_V1_UUID_OFFSET;
match_data_345 = data + PTP_V1_UUID_OFFSET + 3;
} else {
if (!pskb_may_pull(skb, PTP_V2_MIN_LENGTH)) {
return false;
}
version = skb->data[PTP_V2_VERSION_OFFSET];
data = skb->data;
version = data[PTP_V2_VERSION_OFFSET];
if ((version & PTP_VERSION_V2_MASK) != PTP_VERSION_V2) {
return false;
}
......@@ -1414,17 +1402,17 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
* enhanced mode fixes this issue and uses bytes 0-2
* and byte 5-7 of the UUID.
*/
match_data_345 = skb->data + PTP_V2_UUID_OFFSET + 5;
match_data_345 = data + PTP_V2_UUID_OFFSET + 5;
if (ptp->mode == MC_CMD_PTP_MODE_V2) {
match_data_012 = skb->data + PTP_V2_UUID_OFFSET + 2;
match_data_012 = data + PTP_V2_UUID_OFFSET + 2;
} else {
match_data_012 = skb->data + PTP_V2_UUID_OFFSET + 0;
match_data_012 = data + PTP_V2_UUID_OFFSET + 0;
BUG_ON(ptp->mode != MC_CMD_PTP_MODE_V2_ENHANCED);
}
}
/* Does this packet require timestamping? */
if (ntohs(*(__be16 *)&skb->data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) {
if (ntohs(*(__be16 *)&data[PTP_DPORT_OFFSET]) == PTP_EVENT_PORT) {
match->state = PTP_PACKET_STATE_UNMATCHED;
/* We expect the sequence number to be in the same position in
......@@ -1440,8 +1428,8 @@ static bool efx_ptp_rx(struct efx_channel *channel, struct sk_buff *skb)
(match_data_345[0] << 24));
match->words[1] = (match_data_345[1] |
(match_data_345[2] << 8) |
(skb->data[PTP_V1_SEQUENCE_OFFSET +
PTP_V1_SEQUENCE_LENGTH - 1] <<
(data[PTP_V1_SEQUENCE_OFFSET +
PTP_V1_SEQUENCE_LENGTH - 1] <<
16));
} else {
match->state = PTP_PACKET_STATE_MATCH_UNWANTED;
......@@ -1635,13 +1623,9 @@ static void ptp_event_rx(struct efx_nic *efx, struct efx_ptp_data *ptp)
list_add_tail(&evt->link, &ptp->evt_list);
queue_work(ptp->workwq, &ptp->work);
} else if (!ptp->evt_overflow) {
/* Log a warning message and set the event overflow flag.
* The message won't be logged again until the event queue
* becomes empty.
*/
} else if (net_ratelimit()) {
/* Log a rate-limited warning message. */
netif_err(efx, rx_err, efx->net_dev, "PTP event queue overflow\n");
ptp->evt_overflow = true;
}
spin_unlock_bh(&ptp->evt_lock);
}
......
......@@ -787,15 +787,6 @@ void efx_remove_tx_queue(struct efx_tx_queue *tx_queue)
* Requires TX checksum offload support.
*/
/* Number of bytes inserted at the start of a TSO header buffer,
* similar to NET_IP_ALIGN.
*/
#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
#define TSOH_OFFSET 0
#else
#define TSOH_OFFSET NET_IP_ALIGN
#endif
#define PTR_DIFF(p1, p2) ((u8 *)(p1) - (u8 *)(p2))
/**
......@@ -882,13 +873,13 @@ static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
EFX_BUG_ON_PARANOID(buffer->flags);
EFX_BUG_ON_PARANOID(buffer->unmap_len);
if (likely(len <= TSOH_STD_SIZE - TSOH_OFFSET)) {
if (likely(len <= TSOH_STD_SIZE - NET_IP_ALIGN)) {
unsigned index =
(tx_queue->insert_count & tx_queue->ptr_mask) / 2;
struct efx_buffer *page_buf =
&tx_queue->tsoh_page[index / TSOH_PER_PAGE];
unsigned offset =
TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + TSOH_OFFSET;
TSOH_STD_SIZE * (index % TSOH_PER_PAGE) + NET_IP_ALIGN;
if (unlikely(!page_buf->addr) &&
efx_nic_alloc_buffer(tx_queue->efx, page_buf, PAGE_SIZE,
......@@ -901,10 +892,10 @@ static u8 *efx_tsoh_get_buffer(struct efx_tx_queue *tx_queue,
} else {
tx_queue->tso_long_headers++;
buffer->heap_buf = kmalloc(TSOH_OFFSET + len, GFP_ATOMIC);
buffer->heap_buf = kmalloc(NET_IP_ALIGN + len, GFP_ATOMIC);
if (unlikely(!buffer->heap_buf))
return NULL;
result = (u8 *)buffer->heap_buf + TSOH_OFFSET;
result = (u8 *)buffer->heap_buf + NET_IP_ALIGN;
buffer->flags = EFX_TX_BUF_CONT | EFX_TX_BUF_HEAP;
}
......@@ -1011,7 +1002,7 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
static int tso_start(struct tso_state *st, struct efx_nic *efx,
const struct sk_buff *skb)
{
bool use_options = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
bool use_opt_desc = efx_nic_rev(efx) >= EFX_REV_HUNT_A0;
struct device *dma_dev = &efx->pci_dev->dev;
unsigned int header_len, in_len;
dma_addr_t dma_addr;
......@@ -1037,7 +1028,7 @@ static int tso_start(struct tso_state *st, struct efx_nic *efx,
st->out_len = skb->len - header_len;
if (!use_options) {
if (!use_opt_desc) {
st->header_unmap_len = 0;
if (likely(in_len == 0)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment