Commit 72ec301a authored by David S. Miller's avatar David S. Miller

Merge branch 'master' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/net-next

Jeff Kirsher says:

====================
This series contains updates to ixgbe and igb.

Alexander Duyck (13):
  ixgbe: Initialize q_vector cpu and affinity masks correctly
  ixgbe: Enable jumbo frames support w/ SR-IOV
  ixgbe: Move message handling routines into their own functions
  ixgbe: Add mailbox API version negotiation support to ixgbe PF
  igb: Split Rx timestamping into two separate functions
  igb: Do not use header split, instead receive all frames into a
    single buffer
  igb: Combine post-processing of skb into a single function
  igb: Map entire page and sync half instead of mapping and unmapping
    half pages
  igb: Move rx_buffer related code in Rx cleanup path into separate
    function
  igb: Lock buffer size at 2K even on systems with larger pages
  igb: Combine q_vector and ring allocation into a single function
  igb: Move the calls to set the Tx and Rx queues into igb_open
  igb: Split igb_update_dca into separate Tx and Rx functions

Tushar Dave (1):
  igb: Correcting and improving small packet check and padding
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 1b6f0f92 6a05004a
...@@ -172,10 +172,13 @@ struct e1000_adv_tx_context_desc { ...@@ -172,10 +172,13 @@ struct e1000_adv_tx_context_desc {
#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ #define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ #define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ #define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */
#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ #define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ #define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */ #define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
/* Additional DCA related definitions, note change in position of CPUID */ /* Additional DCA related definitions, note change in position of CPUID */
#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */ #define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
......
...@@ -132,9 +132,10 @@ struct vf_data_storage { ...@@ -132,9 +132,10 @@ struct vf_data_storage {
#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 #define MAXIMUM_ETHERNET_VLAN_SIZE 1522
/* Supported Rx Buffer Sizes */ /* Supported Rx Buffer Sizes */
#define IGB_RXBUFFER_256 256 #define IGB_RXBUFFER_256 256
#define IGB_RXBUFFER_16384 16384 #define IGB_RXBUFFER_2048 2048
#define IGB_RX_HDR_LEN IGB_RXBUFFER_256 #define IGB_RX_HDR_LEN IGB_RXBUFFER_256
#define IGB_RX_BUFSZ IGB_RXBUFFER_2048
/* How many Tx Descriptors do we need to call netif_wake_queue ? */ /* How many Tx Descriptors do we need to call netif_wake_queue ? */
#define IGB_TX_QUEUE_WAKE 16 #define IGB_TX_QUEUE_WAKE 16
...@@ -174,11 +175,9 @@ struct igb_tx_buffer { ...@@ -174,11 +175,9 @@ struct igb_tx_buffer {
}; };
struct igb_rx_buffer { struct igb_rx_buffer {
struct sk_buff *skb;
dma_addr_t dma; dma_addr_t dma;
struct page *page; struct page *page;
dma_addr_t page_dma; unsigned int page_offset;
u32 page_offset;
}; };
struct igb_tx_queue_stats { struct igb_tx_queue_stats {
...@@ -205,22 +204,6 @@ struct igb_ring_container { ...@@ -205,22 +204,6 @@ struct igb_ring_container {
u8 itr; /* current ITR setting for ring */ u8 itr; /* current ITR setting for ring */
}; };
struct igb_q_vector {
struct igb_adapter *adapter; /* backlink */
int cpu; /* CPU for DCA */
u32 eims_value; /* EIMS mask value */
struct igb_ring_container rx, tx;
struct napi_struct napi;
u16 itr_val;
u8 set_itr;
void __iomem *itr_register;
char name[IFNAMSIZ + 9];
};
struct igb_ring { struct igb_ring {
struct igb_q_vector *q_vector; /* backlink to q_vector */ struct igb_q_vector *q_vector; /* backlink to q_vector */
struct net_device *netdev; /* back pointer to net_device */ struct net_device *netdev; /* back pointer to net_device */
...@@ -232,15 +215,17 @@ struct igb_ring { ...@@ -232,15 +215,17 @@ struct igb_ring {
void *desc; /* descriptor ring memory */ void *desc; /* descriptor ring memory */
unsigned long flags; /* ring specific flags */ unsigned long flags; /* ring specific flags */
void __iomem *tail; /* pointer to ring tail register */ void __iomem *tail; /* pointer to ring tail register */
dma_addr_t dma; /* phys address of the ring */
unsigned int size; /* length of desc. ring in bytes */
u16 count; /* number of desc. in the ring */ u16 count; /* number of desc. in the ring */
u8 queue_index; /* logical index of the ring*/ u8 queue_index; /* logical index of the ring*/
u8 reg_idx; /* physical index of the ring */ u8 reg_idx; /* physical index of the ring */
u32 size; /* length of desc. ring in bytes */
/* everything past this point are written often */ /* everything past this point are written often */
u16 next_to_clean ____cacheline_aligned_in_smp; u16 next_to_clean;
u16 next_to_use; u16 next_to_use;
u16 next_to_alloc;
union { union {
/* TX */ /* TX */
...@@ -251,12 +236,30 @@ struct igb_ring { ...@@ -251,12 +236,30 @@ struct igb_ring {
}; };
/* RX */ /* RX */
struct { struct {
struct sk_buff *skb;
struct igb_rx_queue_stats rx_stats; struct igb_rx_queue_stats rx_stats;
struct u64_stats_sync rx_syncp; struct u64_stats_sync rx_syncp;
}; };
}; };
/* Items past this point are only used during ring alloc / free */ } ____cacheline_internodealigned_in_smp;
dma_addr_t dma; /* phys address of the ring */
struct igb_q_vector {
struct igb_adapter *adapter; /* backlink */
int cpu; /* CPU for DCA */
u32 eims_value; /* EIMS mask value */
u16 itr_val;
u8 set_itr;
void __iomem *itr_register;
struct igb_ring_container rx, tx;
struct napi_struct napi;
struct rcu_head rcu; /* to avoid race with update stats on free */
char name[IFNAMSIZ + 9];
/* for dynamic allocation of rings associated with this q_vector */
struct igb_ring ring[0] ____cacheline_internodealigned_in_smp;
}; };
enum e1000_ring_flags_t { enum e1000_ring_flags_t {
...@@ -442,9 +445,20 @@ extern void igb_ptp_stop(struct igb_adapter *adapter); ...@@ -442,9 +445,20 @@ extern void igb_ptp_stop(struct igb_adapter *adapter);
extern void igb_ptp_reset(struct igb_adapter *adapter); extern void igb_ptp_reset(struct igb_adapter *adapter);
extern void igb_ptp_tx_work(struct work_struct *work); extern void igb_ptp_tx_work(struct work_struct *work);
extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter); extern void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter);
extern void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector, extern void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb); struct sk_buff *skb);
extern void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
unsigned char *va,
struct sk_buff *skb);
static inline void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
union e1000_adv_rx_desc *rx_desc,
struct sk_buff *skb)
{
if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TS) &&
!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP))
igb_ptp_rx_rgtstamp(q_vector, skb);
}
extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev, extern int igb_ptp_hwtstamp_ioctl(struct net_device *netdev,
struct ifreq *ifr, int cmd); struct ifreq *ifr, int cmd);
#endif /* CONFIG_IGB_PTP */ #endif /* CONFIG_IGB_PTP */
......
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include <linux/sched.h> #include <linux/sched.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <linux/highmem.h>
#include "igb.h" #include "igb.h"
...@@ -1685,16 +1686,24 @@ static void igb_create_lbtest_frame(struct sk_buff *skb, ...@@ -1685,16 +1686,24 @@ static void igb_create_lbtest_frame(struct sk_buff *skb,
memset(&skb->data[frame_size + 12], 0xAF, 1); memset(&skb->data[frame_size + 12], 0xAF, 1);
} }
static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) static int igb_check_lbtest_frame(struct igb_rx_buffer *rx_buffer,
unsigned int frame_size)
{ {
frame_size /= 2; unsigned char *data;
if (*(skb->data + 3) == 0xFF) { bool match = true;
if ((*(skb->data + frame_size + 10) == 0xBE) &&
(*(skb->data + frame_size + 12) == 0xAF)) { frame_size >>= 1;
return 0;
} data = kmap(rx_buffer->page);
}
return 13; if (data[3] != 0xFF ||
data[frame_size + 10] != 0xBE ||
data[frame_size + 12] != 0xAF)
match = false;
kunmap(rx_buffer->page);
return match;
} }
static int igb_clean_test_rings(struct igb_ring *rx_ring, static int igb_clean_test_rings(struct igb_ring *rx_ring,
...@@ -1704,9 +1713,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring, ...@@ -1704,9 +1713,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
union e1000_adv_rx_desc *rx_desc; union e1000_adv_rx_desc *rx_desc;
struct igb_rx_buffer *rx_buffer_info; struct igb_rx_buffer *rx_buffer_info;
struct igb_tx_buffer *tx_buffer_info; struct igb_tx_buffer *tx_buffer_info;
struct netdev_queue *txq;
u16 rx_ntc, tx_ntc, count = 0; u16 rx_ntc, tx_ntc, count = 0;
unsigned int total_bytes = 0, total_packets = 0;
/* initialize next to clean and descriptor values */ /* initialize next to clean and descriptor values */
rx_ntc = rx_ring->next_to_clean; rx_ntc = rx_ring->next_to_clean;
...@@ -1717,21 +1724,24 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring, ...@@ -1717,21 +1724,24 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
/* check rx buffer */ /* check rx buffer */
rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc]; rx_buffer_info = &rx_ring->rx_buffer_info[rx_ntc];
/* unmap rx buffer, will be remapped by alloc_rx_buffers */ /* sync Rx buffer for CPU read */
dma_unmap_single(rx_ring->dev, dma_sync_single_for_cpu(rx_ring->dev,
rx_buffer_info->dma, rx_buffer_info->dma,
IGB_RX_HDR_LEN, IGB_RX_BUFSZ,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
rx_buffer_info->dma = 0;
/* verify contents of skb */ /* verify contents of skb */
if (!igb_check_lbtest_frame(rx_buffer_info->skb, size)) if (igb_check_lbtest_frame(rx_buffer_info, size))
count++; count++;
/* sync Rx buffer for device write */
dma_sync_single_for_device(rx_ring->dev,
rx_buffer_info->dma,
IGB_RX_BUFSZ,
DMA_FROM_DEVICE);
/* unmap buffer on tx side */ /* unmap buffer on tx side */
tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc]; tx_buffer_info = &tx_ring->tx_buffer_info[tx_ntc];
total_bytes += tx_buffer_info->bytecount;
total_packets += tx_buffer_info->gso_segs;
igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info); igb_unmap_and_free_tx_resource(tx_ring, tx_buffer_info);
/* increment rx/tx next to clean counters */ /* increment rx/tx next to clean counters */
...@@ -1746,8 +1756,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring, ...@@ -1746,8 +1756,7 @@ static int igb_clean_test_rings(struct igb_ring *rx_ring,
rx_desc = IGB_RX_DESC(rx_ring, rx_ntc); rx_desc = IGB_RX_DESC(rx_ring, rx_ntc);
} }
txq = netdev_get_tx_queue(tx_ring->netdev, tx_ring->queue_index); netdev_tx_reset_queue(txring_txq(tx_ring));
netdev_tx_completed_queue(txq, total_packets, total_bytes);
/* re-map buffers to ring, store next to clean values */ /* re-map buffers to ring, store next to clean values */
igb_alloc_rx_buffers(rx_ring, count); igb_alloc_rx_buffers(rx_ring, count);
......
This diff is collapsed.
...@@ -441,18 +441,46 @@ void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter) ...@@ -441,18 +441,46 @@ void igb_ptp_tx_hwtstamp(struct igb_adapter *adapter)
adapter->ptp_tx_skb = NULL; adapter->ptp_tx_skb = NULL;
} }
void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector, /**
union e1000_adv_rx_desc *rx_desc, * igb_ptp_rx_pktstamp - retrieve Rx per packet timestamp
* @q_vector: Pointer to interrupt specific structure
* @va: Pointer to address containing Rx buffer
* @skb: Buffer containing timestamp and packet
*
* This function is meant to retrieve a timestamp from the first buffer of an
* incoming frame. The value is stored in little endian format starting on
* byte 8.
*/
void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector,
unsigned char *va,
struct sk_buff *skb)
{
u64 *regval = (u64 *)va;
/*
* The timestamp is recorded in little endian format.
* DWORD: 0 1 2 3
* Field: Reserved Reserved SYSTIML SYSTIMH
*/
igb_ptp_systim_to_hwtstamp(q_vector->adapter, skb_hwtstamps(skb),
le64_to_cpu(regval[1]));
}
/**
* igb_ptp_rx_rgtstamp - retrieve Rx timestamp stored in register
* @q_vector: Pointer to interrupt specific structure
* @skb: Buffer containing timestamp and packet
*
* This function is meant to retrieve a timestamp from the internal registers
* of the adapter and store it in the skb.
*/
void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector,
struct sk_buff *skb) struct sk_buff *skb)
{ {
struct igb_adapter *adapter = q_vector->adapter; struct igb_adapter *adapter = q_vector->adapter;
struct e1000_hw *hw = &adapter->hw; struct e1000_hw *hw = &adapter->hw;
u64 regval; u64 regval;
if (!igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP |
E1000_RXDADV_STAT_TS))
return;
/* /*
* If this bit is set, then the RX registers contain the time stamp. No * If this bit is set, then the RX registers contain the time stamp. No
* other packet will be time stamped until we read these registers, so * other packet will be time stamped until we read these registers, so
...@@ -464,18 +492,11 @@ void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector, ...@@ -464,18 +492,11 @@ void igb_ptp_rx_hwtstamp(struct igb_q_vector *q_vector,
* If nothing went wrong, then it should have a shared tx_flags that we * If nothing went wrong, then it should have a shared tx_flags that we
* can turn into a skb_shared_hwtstamps. * can turn into a skb_shared_hwtstamps.
*/ */
if (igb_test_staterr(rx_desc, E1000_RXDADV_STAT_TSIP)) { if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
u32 *stamp = (u32 *)skb->data; return;
regval = le32_to_cpu(*(stamp + 2));
regval |= (u64)le32_to_cpu(*(stamp + 3)) << 32;
skb_pull(skb, IGB_TS_HDR_LEN);
} else {
if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
return;
regval = rd32(E1000_RXSTMPL); regval = rd32(E1000_RXSTMPL);
regval |= (u64)rd32(E1000_RXSTMPH) << 32; regval |= (u64)rd32(E1000_RXSTMPH) << 32;
}
igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); igb_ptp_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
} }
......
...@@ -135,6 +135,7 @@ struct vf_data_storage { ...@@ -135,6 +135,7 @@ struct vf_data_storage {
u16 tx_rate; u16 tx_rate;
u16 vlan_count; u16 vlan_count;
u8 spoofchk_enabled; u8 spoofchk_enabled;
unsigned int vf_api;
}; };
struct vf_macvlans { struct vf_macvlans {
......
...@@ -800,6 +800,10 @@ int ixgbe_fcoe_enable(struct net_device *netdev) ...@@ -800,6 +800,10 @@ int ixgbe_fcoe_enable(struct net_device *netdev)
return -EINVAL; return -EINVAL;
e_info(drv, "Enabling FCoE offload features.\n"); e_info(drv, "Enabling FCoE offload features.\n");
if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)
e_warn(probe, "Enabling FCoE on PF will disable legacy VFs\n");
if (netif_running(netdev)) if (netif_running(netdev))
netdev->netdev_ops->ndo_stop(netdev); netdev->netdev_ops->ndo_stop(netdev);
......
...@@ -802,10 +802,13 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, ...@@ -802,10 +802,13 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter,
/* setup affinity mask and node */ /* setup affinity mask and node */
if (cpu != -1) if (cpu != -1)
cpumask_set_cpu(cpu, &q_vector->affinity_mask); cpumask_set_cpu(cpu, &q_vector->affinity_mask);
else
cpumask_copy(&q_vector->affinity_mask, cpu_online_mask);
q_vector->numa_node = node; q_vector->numa_node = node;
#ifdef CONFIG_IXGBE_DCA
/* initialize CPU for DCA */
q_vector->cpu = -1;
#endif
/* initialize NAPI */ /* initialize NAPI */
netif_napi_add(adapter->netdev, &q_vector->napi, netif_napi_add(adapter->netdev, &q_vector->napi,
ixgbe_poll, 64); ixgbe_poll, 64);
......
...@@ -3263,6 +3263,11 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) ...@@ -3263,6 +3263,11 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter)
max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE; max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE;
#endif /* IXGBE_FCOE */ #endif /* IXGBE_FCOE */
/* adjust max frame to be at least the size of a standard frame */
if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN))
max_frame = (ETH_FRAME_LEN + ETH_FCS_LEN);
mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD); mhadd = IXGBE_READ_REG(hw, IXGBE_MHADD);
if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) { if (max_frame != (mhadd >> IXGBE_MHADD_MFS_SHIFT)) {
mhadd &= ~IXGBE_MHADD_MFS_MASK; mhadd &= ~IXGBE_MHADD_MFS_MASK;
...@@ -4828,14 +4833,14 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) ...@@ -4828,14 +4833,14 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu)
return -EINVAL; return -EINVAL;
/* /*
* For 82599EB we cannot allow PF to change MTU greater than 1500 * For 82599EB we cannot allow legacy VFs to enable their receive
* in SR-IOV mode as it may cause buffer overruns in guest VFs that * paths when MTU greater than 1500 is configured. So display a
* don't allocate and chain buffers correctly. * warning that legacy VFs will be disabled.
*/ */
if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) &&
(adapter->hw.mac.type == ixgbe_mac_82599EB) && (adapter->hw.mac.type == ixgbe_mac_82599EB) &&
(max_frame > MAXIMUM_ETHERNET_VLAN_SIZE)) (max_frame > MAXIMUM_ETHERNET_VLAN_SIZE))
return -EINVAL; e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n");
e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu);
......
...@@ -62,12 +62,29 @@ ...@@ -62,12 +62,29 @@
/* bits 23:16 are used for exra info for certain messages */ /* bits 23:16 are used for exra info for certain messages */
#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) #define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT)
/* definitions to support mailbox API version negotiation */
/*
* Each element denotes a version of the API; existing numbers may not
* change; any additions must go at the end
*/
enum ixgbe_pfvf_api_rev {
ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */
ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */
/* This value should always be last */
ixgbe_mbox_api_unknown, /* indicates that API version is not known */
};
/* mailbox API, legacy requests */
#define IXGBE_VF_RESET 0x01 /* VF requests reset */ #define IXGBE_VF_RESET 0x01 /* VF requests reset */
#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ #define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */
#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ #define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */
#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ #define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */
#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */ /* mailbox API, version 1.0 VF requests */
#define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */
#define IXGBE_VF_SET_MACVLAN 0x06 /* VF requests PF for unicast filter */
#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */
/* length of permanent address message returned from PF */ /* length of permanent address message returned from PF */
#define IXGBE_VF_PERMADDR_MSG_LEN 4 #define IXGBE_VF_PERMADDR_MSG_LEN 4
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment