Commit efad0c14 authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next

Jeff Kirsher says:

====================
This series contains updates to ioat (DCA) and ixgbevf.
 ...
Alexander Duyck (1):
  ioat: Do not enable DCA if tag map is invalid

Greg Rose (8):
  ixgbevf: Streamline the rx buffer allocation
  ixgbevf: Fix unnecessary dereference where local var is available.
  ixgbevf: Remove the ring adapter pointer value
  ixgbevf: Remove checking for mac.ops function pointers
  ixgbevf: Remove mailbox spinlock from the reset function
  ixgbevf: White space and comments clean up
  ixgbevf: Remove unneeded and obsolete comment
  ixgbevf: Add checksum statistics counters to rings
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 1ff05fb7 55fb277c
...@@ -604,6 +604,23 @@ static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset) ...@@ -604,6 +604,23 @@ static int ioat3_dca_count_dca_slots(void *iobase, u16 dca_offset)
return slots; return slots;
} }
static inline int dca3_tag_map_invalid(u8 *tag_map)
{
/*
* If the tag map is not programmed by the BIOS the default is:
* 0x80 0x80 0x80 0x80 0x80 0x00 0x00 0x00
*
* This an invalid map and will result in only 2 possible tags
* 0x1F and 0x00. 0x00 is an invalid DCA tag so we know that
* this entire definition is invalid.
*/
return ((tag_map[0] == DCA_TAG_MAP_VALID) &&
(tag_map[1] == DCA_TAG_MAP_VALID) &&
(tag_map[2] == DCA_TAG_MAP_VALID) &&
(tag_map[3] == DCA_TAG_MAP_VALID) &&
(tag_map[4] == DCA_TAG_MAP_VALID));
}
struct dca_provider * __devinit struct dca_provider * __devinit
ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase) ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
{ {
...@@ -674,6 +691,12 @@ ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase) ...@@ -674,6 +691,12 @@ ioat3_dca_init(struct pci_dev *pdev, void __iomem *iobase)
ioatdca->tag_map[i] = bit & DCA_TAG_MAP_MASK; ioatdca->tag_map[i] = bit & DCA_TAG_MAP_MASK;
} }
if (dca3_tag_map_invalid(ioatdca->tag_map)) {
dev_err(&pdev->dev, "APICID_TAG_MAP set incorrectly by BIOS, disabling DCA\n");
free_dca_provider(dca);
return NULL;
}
err = register_dca_provider(dca, &pdev->dev); err = register_dca_provider(dca, &pdev->dev);
if (err) { if (err) {
free_dca_provider(dca); free_dca_provider(dca);
......
...@@ -58,7 +58,6 @@ struct ixgbevf_ring { ...@@ -58,7 +58,6 @@ struct ixgbevf_ring {
struct ixgbevf_ring *next; struct ixgbevf_ring *next;
struct net_device *netdev; struct net_device *netdev;
struct device *dev; struct device *dev;
struct ixgbevf_adapter *adapter; /* backlink */
void *desc; /* descriptor ring memory */ void *desc; /* descriptor ring memory */
dma_addr_t dma; /* phys. address of descriptor ring */ dma_addr_t dma; /* phys. address of descriptor ring */
unsigned int size; /* length in bytes */ unsigned int size; /* length in bytes */
...@@ -75,6 +74,8 @@ struct ixgbevf_ring { ...@@ -75,6 +74,8 @@ struct ixgbevf_ring {
u64 total_bytes; u64 total_bytes;
u64 total_packets; u64 total_packets;
struct u64_stats_sync syncp; struct u64_stats_sync syncp;
u64 hw_csum_rx_error;
u64 hw_csum_rx_good;
u16 head; u16 head;
u16 tail; u16 tail;
......
...@@ -121,7 +121,6 @@ static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw, ...@@ -121,7 +121,6 @@ static inline void ixgbevf_release_rx_desc(struct ixgbe_hw *hw,
* @direction: 0 for Rx, 1 for Tx, -1 for other causes * @direction: 0 for Rx, 1 for Tx, -1 for other causes
* @queue: queue to map the corresponding interrupt to * @queue: queue to map the corresponding interrupt to
* @msix_vector: the vector to map to the corresponding queue * @msix_vector: the vector to map to the corresponding queue
*
*/ */
static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction, static void ixgbevf_set_ivar(struct ixgbevf_adapter *adapter, s8 direction,
u8 queue, u8 msix_vector) u8 queue, u8 msix_vector)
...@@ -296,12 +295,11 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector, ...@@ -296,12 +295,11 @@ static void ixgbevf_receive_skb(struct ixgbevf_q_vector *q_vector,
/** /**
* ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum * ixgbevf_rx_checksum - indicate in skb if hw indicated a good cksum
* @adapter: address of board private structure * @ring: pointer to Rx descriptor ring structure
* @status_err: hardware indication of status of receive * @status_err: hardware indication of status of receive
* @skb: skb currently being received and modified * @skb: skb currently being received and modified
**/ **/
static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter, static inline void ixgbevf_rx_checksum(struct ixgbevf_ring *ring,
struct ixgbevf_ring *ring,
u32 status_err, struct sk_buff *skb) u32 status_err, struct sk_buff *skb)
{ {
skb_checksum_none_assert(skb); skb_checksum_none_assert(skb);
...@@ -313,7 +311,7 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter, ...@@ -313,7 +311,7 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
/* if IP and error */ /* if IP and error */
if ((status_err & IXGBE_RXD_STAT_IPCS) && if ((status_err & IXGBE_RXD_STAT_IPCS) &&
(status_err & IXGBE_RXDADV_ERR_IPE)) { (status_err & IXGBE_RXDADV_ERR_IPE)) {
adapter->hw_csum_rx_error++; ring->hw_csum_rx_error++;
return; return;
} }
...@@ -321,13 +319,13 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter, ...@@ -321,13 +319,13 @@ static inline void ixgbevf_rx_checksum(struct ixgbevf_adapter *adapter,
return; return;
if (status_err & IXGBE_RXDADV_ERR_TCPE) { if (status_err & IXGBE_RXDADV_ERR_TCPE) {
adapter->hw_csum_rx_error++; ring->hw_csum_rx_error++;
return; return;
} }
/* It must be a TCP or UDP packet with a valid checksum */ /* It must be a TCP or UDP packet with a valid checksum */
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
adapter->hw_csum_rx_good++; ring->hw_csum_rx_good++;
} }
/** /**
...@@ -341,15 +339,16 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, ...@@ -341,15 +339,16 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
struct pci_dev *pdev = adapter->pdev; struct pci_dev *pdev = adapter->pdev;
union ixgbe_adv_rx_desc *rx_desc; union ixgbe_adv_rx_desc *rx_desc;
struct ixgbevf_rx_buffer *bi; struct ixgbevf_rx_buffer *bi;
struct sk_buff *skb;
unsigned int i = rx_ring->next_to_use; unsigned int i = rx_ring->next_to_use;
bi = &rx_ring->rx_buffer_info[i]; bi = &rx_ring->rx_buffer_info[i];
while (cleaned_count--) { while (cleaned_count--) {
rx_desc = IXGBEVF_RX_DESC(rx_ring, i); rx_desc = IXGBEVF_RX_DESC(rx_ring, i);
skb = bi->skb;
if (!skb) { if (!bi->skb) {
struct sk_buff *skb;
skb = netdev_alloc_skb_ip_align(rx_ring->netdev, skb = netdev_alloc_skb_ip_align(rx_ring->netdev,
rx_ring->rx_buf_len); rx_ring->rx_buf_len);
if (!skb) { if (!skb) {
...@@ -357,8 +356,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, ...@@ -357,8 +356,7 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
goto no_buffers; goto no_buffers;
} }
bi->skb = skb; bi->skb = skb;
}
if (!bi->dma) {
bi->dma = dma_map_single(&pdev->dev, skb->data, bi->dma = dma_map_single(&pdev->dev, skb->data,
rx_ring->rx_buf_len, rx_ring->rx_buf_len,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
...@@ -380,7 +378,6 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter, ...@@ -380,7 +378,6 @@ static void ixgbevf_alloc_rx_buffers(struct ixgbevf_adapter *adapter,
no_buffers: no_buffers:
if (rx_ring->next_to_use != i) { if (rx_ring->next_to_use != i) {
rx_ring->next_to_use = i; rx_ring->next_to_use = i;
ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i); ixgbevf_release_rx_desc(&adapter->hw, rx_ring, i);
} }
} }
...@@ -464,7 +461,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector, ...@@ -464,7 +461,7 @@ static bool ixgbevf_clean_rx_irq(struct ixgbevf_q_vector *q_vector,
goto next_desc; goto next_desc;
} }
ixgbevf_rx_checksum(adapter, rx_ring, staterr, skb); ixgbevf_rx_checksum(rx_ring, staterr, skb);
/* probably a little skewed due to removing CRC */ /* probably a little skewed due to removing CRC */
total_rx_bytes += skb->len; total_rx_bytes += skb->len;
...@@ -765,7 +762,6 @@ static irqreturn_t ixgbevf_msix_other(int irq, void *data) ...@@ -765,7 +762,6 @@ static irqreturn_t ixgbevf_msix_other(int irq, void *data)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
/** /**
* ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues) * ixgbevf_msix_clean_rings - single unshared vector rx clean (all queues)
* @irq: unused * @irq: unused
...@@ -1150,9 +1146,6 @@ static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid) ...@@ -1150,9 +1146,6 @@ static int ixgbevf_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
int err; int err;
if (!hw->mac.ops.set_vfta)
return -EOPNOTSUPP;
spin_lock_bh(&adapter->mbx_lock); spin_lock_bh(&adapter->mbx_lock);
/* add VID to filter table */ /* add VID to filter table */
...@@ -1181,8 +1174,7 @@ static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) ...@@ -1181,8 +1174,7 @@ static int ixgbevf_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
spin_lock_bh(&adapter->mbx_lock); spin_lock_bh(&adapter->mbx_lock);
/* remove VID from filter table */ /* remove VID from filter table */
if (hw->mac.ops.set_vfta) err = hw->mac.ops.set_vfta(hw, vid, 0, false);
err = hw->mac.ops.set_vfta(hw, vid, 0, false);
spin_unlock_bh(&adapter->mbx_lock); spin_unlock_bh(&adapter->mbx_lock);
...@@ -1228,12 +1220,13 @@ static int ixgbevf_write_uc_addr_list(struct net_device *netdev) ...@@ -1228,12 +1220,13 @@ static int ixgbevf_write_uc_addr_list(struct net_device *netdev)
} }
/** /**
* ixgbevf_set_rx_mode - Multicast set * ixgbevf_set_rx_mode - Multicast and unicast set
* @netdev: network interface device structure * @netdev: network interface device structure
* *
* The set_rx_method entry point is called whenever the multicast address * The set_rx_method entry point is called whenever the multicast address
* list or the network interface flags are updated. This routine is * list, unicast address list or the network interface flags are updated.
* responsible for configuring the hardware for proper multicast mode. * This routine is responsible for configuring the hardware for proper
* multicast mode and configuring requested unicast filters.
**/ **/
static void ixgbevf_set_rx_mode(struct net_device *netdev) static void ixgbevf_set_rx_mode(struct net_device *netdev)
{ {
...@@ -1243,8 +1236,7 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev) ...@@ -1243,8 +1236,7 @@ static void ixgbevf_set_rx_mode(struct net_device *netdev)
spin_lock_bh(&adapter->mbx_lock); spin_lock_bh(&adapter->mbx_lock);
/* reprogram multicast list */ /* reprogram multicast list */
if (hw->mac.ops.update_mc_addr_list) hw->mac.ops.update_mc_addr_list(hw, netdev);
hw->mac.ops.update_mc_addr_list(hw, netdev);
ixgbevf_write_uc_addr_list(netdev); ixgbevf_write_uc_addr_list(netdev);
...@@ -1312,8 +1304,8 @@ static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter, ...@@ -1312,8 +1304,8 @@ static inline void ixgbevf_rx_desc_queue_enable(struct ixgbevf_adapter *adapter,
"not set within the polling period\n", rxr); "not set within the polling period\n", rxr);
} }
ixgbevf_release_rx_desc(&adapter->hw, &adapter->rx_ring[rxr], ixgbevf_release_rx_desc(hw, &adapter->rx_ring[rxr],
(adapter->rx_ring[rxr].count - 1)); adapter->rx_ring[rxr].count - 1);
} }
static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter) static void ixgbevf_save_reset_stats(struct ixgbevf_adapter *adapter)
...@@ -1414,12 +1406,10 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter) ...@@ -1414,12 +1406,10 @@ static void ixgbevf_up_complete(struct ixgbevf_adapter *adapter)
spin_lock_bh(&adapter->mbx_lock); spin_lock_bh(&adapter->mbx_lock);
if (hw->mac.ops.set_rar) { if (is_valid_ether_addr(hw->mac.addr))
if (is_valid_ether_addr(hw->mac.addr)) hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0); else
else hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
hw->mac.ops.set_rar(hw, 0, hw->mac.perm_addr, 0);
}
spin_unlock_bh(&adapter->mbx_lock); spin_unlock_bh(&adapter->mbx_lock);
...@@ -1595,7 +1585,6 @@ static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter, ...@@ -1595,7 +1585,6 @@ static void ixgbevf_clean_tx_ring(struct ixgbevf_adapter *adapter,
return; return;
/* Free all the Tx ring sk_buffs */ /* Free all the Tx ring sk_buffs */
for (i = 0; i < tx_ring->count; i++) { for (i = 0; i < tx_ring->count; i++) {
tx_buffer_info = &tx_ring->tx_buffer_info[i]; tx_buffer_info = &tx_ring->tx_buffer_info[i];
ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); ixgbevf_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
...@@ -1691,13 +1680,6 @@ void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter) ...@@ -1691,13 +1680,6 @@ void ixgbevf_reinit_locked(struct ixgbevf_adapter *adapter)
while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state)) while (test_and_set_bit(__IXGBEVF_RESETTING, &adapter->state))
msleep(1); msleep(1);
/*
* Check if PF is up before re-init. If not then skip until
* later when the PF is up and ready to service requests from
* the VF via mailbox. If the VF is up and running then the
* watchdog task will continue to schedule reset tasks until
* the PF is up and running.
*/
ixgbevf_down(adapter); ixgbevf_down(adapter);
ixgbevf_up(adapter); ixgbevf_up(adapter);
...@@ -1709,15 +1691,11 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter) ...@@ -1709,15 +1691,11 @@ void ixgbevf_reset(struct ixgbevf_adapter *adapter)
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
struct net_device *netdev = adapter->netdev; struct net_device *netdev = adapter->netdev;
spin_lock_bh(&adapter->mbx_lock);
if (hw->mac.ops.reset_hw(hw)) if (hw->mac.ops.reset_hw(hw))
hw_dbg(hw, "PF still resetting\n"); hw_dbg(hw, "PF still resetting\n");
else else
hw->mac.ops.init_hw(hw); hw->mac.ops.init_hw(hw);
spin_unlock_bh(&adapter->mbx_lock);
if (is_valid_ether_addr(adapter->hw.mac.addr)) { if (is_valid_ether_addr(adapter->hw.mac.addr)) {
memcpy(netdev->dev_addr, adapter->hw.mac.addr, memcpy(netdev->dev_addr, adapter->hw.mac.addr,
netdev->addr_len); netdev->addr_len);
...@@ -1768,6 +1746,7 @@ static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter, ...@@ -1768,6 +1746,7 @@ static int ixgbevf_acquire_msix_vectors(struct ixgbevf_adapter *adapter,
*/ */
adapter->num_msix_vectors = vectors; adapter->num_msix_vectors = vectors;
} }
return err; return err;
} }
...@@ -2064,7 +2043,7 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter) ...@@ -2064,7 +2043,7 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
goto out; goto out;
} }
memcpy(adapter->netdev->dev_addr, adapter->hw.mac.addr, memcpy(adapter->netdev->dev_addr, adapter->hw.mac.addr,
adapter->netdev->addr_len); adapter->netdev->addr_len);
} }
/* lock to protect mailbox accesses */ /* lock to protect mailbox accesses */
...@@ -2114,6 +2093,7 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter) ...@@ -2114,6 +2093,7 @@ static int __devinit ixgbevf_sw_init(struct ixgbevf_adapter *adapter)
void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
{ {
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
int i;
UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc, UPDATE_VF_COUNTER_32bit(IXGBE_VFGPRC, adapter->stats.last_vfgprc,
adapter->stats.vfgprc); adapter->stats.vfgprc);
...@@ -2127,6 +2107,15 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter) ...@@ -2127,6 +2107,15 @@ void ixgbevf_update_stats(struct ixgbevf_adapter *adapter)
adapter->stats.vfgotc); adapter->stats.vfgotc);
UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc, UPDATE_VF_COUNTER_32bit(IXGBE_VFMPRC, adapter->stats.last_vfmprc,
adapter->stats.vfmprc); adapter->stats.vfmprc);
for (i = 0; i < adapter->num_rx_queues; i++) {
adapter->hw_csum_rx_error +=
adapter->rx_ring[i].hw_csum_rx_error;
adapter->hw_csum_rx_good +=
adapter->rx_ring[i].hw_csum_rx_good;
adapter->rx_ring[i].hw_csum_rx_error = 0;
adapter->rx_ring[i].hw_csum_rx_good = 0;
}
} }
/** /**
...@@ -2201,6 +2190,7 @@ static void ixgbevf_watchdog_task(struct work_struct *work) ...@@ -2201,6 +2190,7 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
struct ixgbe_hw *hw = &adapter->hw; struct ixgbe_hw *hw = &adapter->hw;
u32 link_speed = adapter->link_speed; u32 link_speed = adapter->link_speed;
bool link_up = adapter->link_up; bool link_up = adapter->link_up;
s32 need_reset;
adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK; adapter->flags |= IXGBE_FLAG_IN_WATCHDOG_TASK;
...@@ -2208,29 +2198,19 @@ static void ixgbevf_watchdog_task(struct work_struct *work) ...@@ -2208,29 +2198,19 @@ static void ixgbevf_watchdog_task(struct work_struct *work)
* Always check the link on the watchdog because we have * Always check the link on the watchdog because we have
* no LSC interrupt * no LSC interrupt
*/ */
if (hw->mac.ops.check_link) { spin_lock_bh(&adapter->mbx_lock);
s32 need_reset;
spin_lock_bh(&adapter->mbx_lock);
need_reset = hw->mac.ops.check_link(hw, &link_speed, need_reset = hw->mac.ops.check_link(hw, &link_speed, &link_up, false);
&link_up, false);
spin_unlock_bh(&adapter->mbx_lock); spin_unlock_bh(&adapter->mbx_lock);
if (need_reset) { if (need_reset) {
adapter->link_up = link_up; adapter->link_up = link_up;
adapter->link_speed = link_speed; adapter->link_speed = link_speed;
netif_carrier_off(netdev); netif_carrier_off(netdev);
netif_tx_stop_all_queues(netdev); netif_tx_stop_all_queues(netdev);
schedule_work(&adapter->reset_task); schedule_work(&adapter->reset_task);
goto pf_has_reset; goto pf_has_reset;
}
} else {
/* always assume link is up, if no check link
* function */
link_speed = IXGBE_LINK_SPEED_10GB_FULL;
link_up = true;
} }
adapter->link_up = link_up; adapter->link_up = link_up;
adapter->link_speed = link_speed; adapter->link_speed = link_speed;
...@@ -2723,9 +2703,6 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring, ...@@ -2723,9 +2703,6 @@ static int ixgbevf_tso(struct ixgbevf_ring *tx_ring,
static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring, static bool ixgbevf_tx_csum(struct ixgbevf_ring *tx_ring,
struct sk_buff *skb, u32 tx_flags) struct sk_buff *skb, u32 tx_flags)
{ {
u32 vlan_macip_lens = 0; u32 vlan_macip_lens = 0;
u32 mss_l4len_idx = 0; u32 mss_l4len_idx = 0;
u32 type_tucmd = 0; u32 type_tucmd = 0;
...@@ -2915,7 +2892,6 @@ static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags, ...@@ -2915,7 +2892,6 @@ static void ixgbevf_tx_queue(struct ixgbevf_ring *tx_ring, int tx_flags,
olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT); olinfo_status |= (1 << IXGBE_ADVTXD_IDX_SHIFT);
if (tx_flags & IXGBE_TX_FLAGS_IPV4) if (tx_flags & IXGBE_TX_FLAGS_IPV4)
olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM; olinfo_status |= IXGBE_ADVTXD_POPTS_IXSM;
} }
/* /*
...@@ -3070,8 +3046,7 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p) ...@@ -3070,8 +3046,7 @@ static int ixgbevf_set_mac(struct net_device *netdev, void *p)
spin_lock_bh(&adapter->mbx_lock); spin_lock_bh(&adapter->mbx_lock);
if (hw->mac.ops.set_rar) hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0);
spin_unlock_bh(&adapter->mbx_lock); spin_unlock_bh(&adapter->mbx_lock);
...@@ -3396,10 +3371,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev, ...@@ -3396,10 +3371,6 @@ static int __devinit ixgbevf_probe(struct pci_dev *pdev,
if (err) if (err)
goto err_sw_init; goto err_sw_init;
/* pick up the PCI bus settings for reporting later */
if (hw->mac.ops.get_bus_info)
hw->mac.ops.get_bus_info(hw);
strcpy(netdev->name, "eth%d"); strcpy(netdev->name, "eth%d");
err = register_netdev(netdev); err = register_netdev(netdev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment