Commit 117ce394 authored by David S. Miller's avatar David S. Miller

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
Intel Wired LAN Driver Updates 2015-12-01

This series contains updates to i40e and i40evf only.

Helin adds new fields to i40e_vsi to store user configured RSS config data
and the code to use it.  Also renamed RSS items to clarify functionality
and scope to users.  Fixed a confusing kernel message of enabling RSS size
by reporting it together with the hardware maximum RSS size.

Anjali fixes the issue of forcing writeback too often causing us to not
benefit from NAPI.

Jesse adds a prefetch for data early in the transmit path to help immensely
for pktgen and forwarding workloads.  Fixed the i40e driver that was
possibly sleeping inside critical section of code.

Carolyn fixes an issue where adminq init failures always provided a message
that NVM was newer than expected, when this is not always the case for
init_adminq failures.  Fixed by adding a check for that specific error
condition and a different helpful message otherwise.

Mitch fixes error message by telling the user which VF is being naughty,
rather than making them guess.  Updated the queue_vector array from a
statically-sized member of the adapter structure, to a dynamically-allocated
and -sized array.  This reduces the size of the adapter structure and allows
us to support any number of queue vectors in the future without changing the
code.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 6cc56834 17652c63
...@@ -266,7 +266,7 @@ struct i40e_pf { ...@@ -266,7 +266,7 @@ struct i40e_pf {
u16 num_lan_qps; /* num lan queues this PF has set up */ u16 num_lan_qps; /* num lan queues this PF has set up */
u16 num_lan_msix; /* num queue vectors for the base PF vsi */ u16 num_lan_msix; /* num queue vectors for the base PF vsi */
int queues_left; /* queues left unclaimed */ int queues_left; /* queues left unclaimed */
u16 rss_size; /* num queues in the RSS array */ u16 alloc_rss_size; /* allocated RSS queues */
u16 rss_size_max; /* HW defined max RSS queues */ u16 rss_size_max; /* HW defined max RSS queues */
u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */ u16 fdir_pf_filter_count; /* num of guaranteed filters for this PF */
u16 num_alloc_vsi; /* num VSIs this driver supports */ u16 num_alloc_vsi; /* num VSIs this driver supports */
...@@ -413,7 +413,7 @@ struct i40e_pf { ...@@ -413,7 +413,7 @@ struct i40e_pf {
u32 rx_hwtstamp_cleared; u32 rx_hwtstamp_cleared;
bool ptp_tx; bool ptp_tx;
bool ptp_rx; bool ptp_rx;
u16 rss_table_size; u16 rss_table_size; /* HW RSS table size */
/* These are only valid in NPAR modes */ /* These are only valid in NPAR modes */
u32 npar_max_bw; u32 npar_max_bw;
u32 npar_min_bw; u32 npar_min_bw;
...@@ -506,8 +506,10 @@ struct i40e_vsi { ...@@ -506,8 +506,10 @@ struct i40e_vsi {
u16 tx_itr_setting; u16 tx_itr_setting;
u16 int_rate_limit; /* value in usecs */ u16 int_rate_limit; /* value in usecs */
u16 rss_table_size; u16 rss_table_size; /* HW RSS table size */
u16 rss_size; u16 rss_size; /* Allocated RSS queues */
u8 *rss_hkey_user; /* User configured hash keys */
u8 *rss_lut_user; /* User configured lookup table entries */
u16 max_frame; u16 max_frame;
u16 rx_hdr_len; u16 rx_hdr_len;
...@@ -695,7 +697,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi, ...@@ -695,7 +697,7 @@ struct i40e_mac_filter *i40e_add_filter(struct i40e_vsi *vsi,
bool is_vf, bool is_netdev); bool is_vf, bool is_netdev);
void i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan, void i40e_del_filter(struct i40e_vsi *vsi, u8 *macaddr, s16 vlan,
bool is_vf, bool is_netdev); bool is_vf, bool is_netdev);
int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl); int i40e_sync_vsi_filters(struct i40e_vsi *vsi);
struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type, struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
u16 uplink, u32 param1); u16 uplink, u32 param1);
int i40e_vsi_release(struct i40e_vsi *vsi); int i40e_vsi_release(struct i40e_vsi *vsi);
......
...@@ -1138,7 +1138,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, ...@@ -1138,7 +1138,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
spin_lock_bh(&vsi->mac_filter_list_lock); spin_lock_bh(&vsi->mac_filter_list_lock);
f = i40e_add_filter(vsi, ma, vlan, false, false); f = i40e_add_filter(vsi, ma, vlan, false, false);
spin_unlock_bh(&vsi->mac_filter_list_lock); spin_unlock_bh(&vsi->mac_filter_list_lock);
ret = i40e_sync_vsi_filters(vsi, true); ret = i40e_sync_vsi_filters(vsi);
if (f && !ret) if (f && !ret)
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
"add macaddr: %pM vlan=%d added to VSI %d\n", "add macaddr: %pM vlan=%d added to VSI %d\n",
...@@ -1177,7 +1177,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp, ...@@ -1177,7 +1177,7 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
spin_lock_bh(&vsi->mac_filter_list_lock); spin_lock_bh(&vsi->mac_filter_list_lock);
i40e_del_filter(vsi, ma, vlan, false, false); i40e_del_filter(vsi, ma, vlan, false, false);
spin_unlock_bh(&vsi->mac_filter_list_lock); spin_unlock_bh(&vsi->mac_filter_list_lock);
ret = i40e_sync_vsi_filters(vsi, true); ret = i40e_sync_vsi_filters(vsi);
if (!ret) if (!ret)
dev_info(&pf->pdev->dev, dev_info(&pf->pdev->dev,
"del macaddr: %pM vlan=%d removed from VSI %d\n", "del macaddr: %pM vlan=%d removed from VSI %d\n",
......
...@@ -2651,10 +2651,8 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir, ...@@ -2651,10 +2651,8 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
{ {
struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_netdev_priv *np = netdev_priv(netdev);
struct i40e_vsi *vsi = np->vsi; struct i40e_vsi *vsi = np->vsi;
u8 seed_def[I40E_HKEY_ARRAY_SIZE]; u8 *seed = NULL;
u8 *lut, *seed = NULL;
u16 i; u16 i;
int ret;
if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP) if (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -2663,18 +2661,27 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir, ...@@ -2663,18 +2661,27 @@ static int i40e_set_rxfh(struct net_device *netdev, const u32 *indir,
return 0; return 0;
if (key) { if (key) {
memcpy(seed_def, key, I40E_HKEY_ARRAY_SIZE); if (!vsi->rss_hkey_user) {
seed = seed_def; vsi->rss_hkey_user = kzalloc(I40E_HKEY_ARRAY_SIZE,
GFP_KERNEL);
if (!vsi->rss_hkey_user)
return -ENOMEM;
}
memcpy(vsi->rss_hkey_user, key, I40E_HKEY_ARRAY_SIZE);
seed = vsi->rss_hkey_user;
} }
lut = kzalloc(I40E_HLUT_ARRAY_SIZE, GFP_KERNEL); if (!vsi->rss_lut_user) {
if (!lut) vsi->rss_lut_user = kzalloc(I40E_HLUT_ARRAY_SIZE, GFP_KERNEL);
return -ENOMEM; if (!vsi->rss_lut_user)
return -ENOMEM;
}
/* Each 32 bits pointed by 'indir' is stored with a lut entry */
for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++) for (i = 0; i < I40E_HLUT_ARRAY_SIZE; i++)
lut[i] = (u8)(indir[i]); vsi->rss_lut_user[i] = (u8)(indir[i]);
ret = i40e_config_rss(vsi, seed, lut, I40E_HLUT_ARRAY_SIZE);
kfree(lut);
return ret; return i40e_config_rss(vsi, seed, vsi->rss_lut_user,
I40E_HLUT_ARRAY_SIZE);
} }
/** /**
......
...@@ -39,7 +39,7 @@ static const char i40e_driver_string[] = ...@@ -39,7 +39,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 4 #define DRV_VERSION_MINOR 4
#define DRV_VERSION_BUILD 2 #define DRV_VERSION_BUILD 4
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN __stringify(DRV_VERSION_BUILD) DRV_KERN
...@@ -1552,9 +1552,11 @@ static int i40e_set_mac(struct net_device *netdev, void *p) ...@@ -1552,9 +1552,11 @@ static int i40e_set_mac(struct net_device *netdev, void *p)
spin_unlock_bh(&vsi->mac_filter_list_lock); spin_unlock_bh(&vsi->mac_filter_list_lock);
} }
i40e_sync_vsi_filters(vsi, false);
ether_addr_copy(netdev->dev_addr, addr->sa_data); ether_addr_copy(netdev->dev_addr, addr->sa_data);
/* schedule our worker thread which will take care of
* applying the new filter changes
*/
i40e_service_event_schedule(vsi->back);
return 0; return 0;
} }
...@@ -1630,7 +1632,8 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi, ...@@ -1630,7 +1632,8 @@ static void i40e_vsi_setup_queue_map(struct i40e_vsi *vsi,
switch (vsi->type) { switch (vsi->type) {
case I40E_VSI_MAIN: case I40E_VSI_MAIN:
qcount = min_t(int, pf->rss_size, num_tc_qps); qcount = min_t(int, pf->alloc_rss_size,
num_tc_qps);
break; break;
#ifdef I40E_FCOE #ifdef I40E_FCOE
case I40E_VSI_FCOE: case I40E_VSI_FCOE:
...@@ -1856,13 +1859,12 @@ static void i40e_cleanup_add_list(struct list_head *add_list) ...@@ -1856,13 +1859,12 @@ static void i40e_cleanup_add_list(struct list_head *add_list)
/** /**
* i40e_sync_vsi_filters - Update the VSI filter list to the HW * i40e_sync_vsi_filters - Update the VSI filter list to the HW
* @vsi: ptr to the VSI * @vsi: ptr to the VSI
* @grab_rtnl: whether RTNL needs to be grabbed
* *
* Push any outstanding VSI filter changes through the AdminQ. * Push any outstanding VSI filter changes through the AdminQ.
* *
* Returns 0 or error value * Returns 0 or error value
**/ **/
int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) int i40e_sync_vsi_filters(struct i40e_vsi *vsi)
{ {
struct list_head tmp_del_list, tmp_add_list; struct list_head tmp_del_list, tmp_add_list;
struct i40e_mac_filter *f, *ftmp, *fclone; struct i40e_mac_filter *f, *ftmp, *fclone;
...@@ -2117,12 +2119,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl) ...@@ -2117,12 +2119,7 @@ int i40e_sync_vsi_filters(struct i40e_vsi *vsi, bool grab_rtnl)
*/ */
if (pf->cur_promisc != cur_promisc) { if (pf->cur_promisc != cur_promisc) {
pf->cur_promisc = cur_promisc; pf->cur_promisc = cur_promisc;
if (grab_rtnl) set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
i40e_do_reset_safe(pf,
BIT(__I40E_PF_RESET_REQUESTED));
else
i40e_do_reset(pf,
BIT(__I40E_PF_RESET_REQUESTED));
} }
} else { } else {
ret = i40e_aq_set_vsi_unicast_promiscuous( ret = i40e_aq_set_vsi_unicast_promiscuous(
...@@ -2171,8 +2168,15 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf) ...@@ -2171,8 +2168,15 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
for (v = 0; v < pf->num_alloc_vsi; v++) { for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v] && if (pf->vsi[v] &&
(pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) (pf->vsi[v]->flags & I40E_VSI_FLAG_FILTER_CHANGED)) {
i40e_sync_vsi_filters(pf->vsi[v], true); int ret = i40e_sync_vsi_filters(pf->vsi[v]);
if (ret) {
/* come back and try again later */
pf->flags |= I40E_FLAG_FILTER_SYNC;
break;
}
}
} }
} }
...@@ -2382,16 +2386,13 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid) ...@@ -2382,16 +2386,13 @@ int i40e_vsi_add_vlan(struct i40e_vsi *vsi, s16 vid)
} }
} }
/* Make sure to release before sync_vsi_filter because that
* function will lock/unlock as necessary
*/
spin_unlock_bh(&vsi->mac_filter_list_lock); spin_unlock_bh(&vsi->mac_filter_list_lock);
if (test_bit(__I40E_DOWN, &vsi->back->state) || /* schedule our worker thread which will take care of
test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) * applying the new filter changes
return 0; */
i40e_service_event_schedule(vsi->back);
return i40e_sync_vsi_filters(vsi, false); return 0;
} }
/** /**
...@@ -2464,16 +2465,13 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid) ...@@ -2464,16 +2465,13 @@ int i40e_vsi_kill_vlan(struct i40e_vsi *vsi, s16 vid)
} }
} }
/* Make sure to release before sync_vsi_filter because that
* function with lock/unlock as necessary
*/
spin_unlock_bh(&vsi->mac_filter_list_lock); spin_unlock_bh(&vsi->mac_filter_list_lock);
if (test_bit(__I40E_DOWN, &vsi->back->state) || /* schedule our worker thread which will take care of
test_bit(__I40E_RESET_RECOVERY_PENDING, &vsi->back->state)) * applying the new filter changes
return 0; */
i40e_service_event_schedule(vsi->back);
return i40e_sync_vsi_filters(vsi, false); return 0;
} }
/** /**
...@@ -2716,6 +2714,11 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring) ...@@ -2716,6 +2714,11 @@ static void i40e_config_xps_tx_ring(struct i40e_ring *ring)
netif_set_xps_queue(ring->netdev, mask, ring->queue_index); netif_set_xps_queue(ring->netdev, mask, ring->queue_index);
free_cpumask_var(mask); free_cpumask_var(mask);
} }
/* schedule our worker thread which will take care of
* applying the new filter changes
*/
i40e_service_event_schedule(vsi->back);
} }
/** /**
...@@ -7300,6 +7303,23 @@ static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors) ...@@ -7300,6 +7303,23 @@ static void i40e_vsi_free_arrays(struct i40e_vsi *vsi, bool free_qvectors)
vsi->rx_rings = NULL; vsi->rx_rings = NULL;
} }
/**
* i40e_clear_rss_config_user - clear the user configured RSS hash keys
* and lookup table
* @vsi: Pointer to VSI structure
*/
static void i40e_clear_rss_config_user(struct i40e_vsi *vsi)
{
if (!vsi)
return;
kfree(vsi->rss_hkey_user);
vsi->rss_hkey_user = NULL;
kfree(vsi->rss_lut_user);
vsi->rss_lut_user = NULL;
}
/** /**
* i40e_vsi_clear - Deallocate the VSI provided * i40e_vsi_clear - Deallocate the VSI provided
* @vsi: the VSI being un-configured * @vsi: the VSI being un-configured
...@@ -7337,6 +7357,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi) ...@@ -7337,6 +7357,7 @@ static int i40e_vsi_clear(struct i40e_vsi *vsi)
i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx); i40e_put_lump(pf->irq_pile, vsi->base_vector, vsi->idx);
i40e_vsi_free_arrays(vsi, true); i40e_vsi_free_arrays(vsi, true);
i40e_clear_rss_config_user(vsi);
pf->vsi[vsi->idx] = NULL; pf->vsi[vsi->idx] = NULL;
if (vsi->idx < pf->next_vsi) if (vsi->idx < pf->next_vsi)
...@@ -7865,7 +7886,7 @@ static int i40e_vsi_config_rss(struct i40e_vsi *vsi) ...@@ -7865,7 +7886,7 @@ static int i40e_vsi_config_rss(struct i40e_vsi *vsi)
i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs); vsi->rss_size = min_t(int, pf->alloc_rss_size, vsi->num_queue_pairs);
ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size); ret = i40e_config_rss_aq(vsi, seed, lut, vsi->rss_table_size);
kfree(lut); kfree(lut);
...@@ -8015,8 +8036,6 @@ static int i40e_pf_config_rss(struct i40e_pf *pf) ...@@ -8015,8 +8036,6 @@ static int i40e_pf_config_rss(struct i40e_pf *pf)
wr32(hw, I40E_PFQF_HENA(0), (u32)hena); wr32(hw, I40E_PFQF_HENA(0), (u32)hena);
wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
vsi->rss_size = min_t(int, pf->rss_size, vsi->num_queue_pairs);
/* Determine the RSS table size based on the hardware capabilities */ /* Determine the RSS table size based on the hardware capabilities */
reg_val = rd32(hw, I40E_PFQF_CTL_0); reg_val = rd32(hw, I40E_PFQF_CTL_0);
reg_val = (pf->rss_table_size == 512) ? reg_val = (pf->rss_table_size == 512) ?
...@@ -8024,15 +8043,29 @@ static int i40e_pf_config_rss(struct i40e_pf *pf) ...@@ -8024,15 +8043,29 @@ static int i40e_pf_config_rss(struct i40e_pf *pf)
(reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512); (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
wr32(hw, I40E_PFQF_CTL_0, reg_val); wr32(hw, I40E_PFQF_CTL_0, reg_val);
/* Determine the RSS size of the VSI */
if (!vsi->rss_size)
vsi->rss_size = min_t(int, pf->alloc_rss_size,
vsi->num_queue_pairs);
lut = kzalloc(vsi->rss_table_size, GFP_KERNEL); lut = kzalloc(vsi->rss_table_size, GFP_KERNEL);
if (!lut) if (!lut)
return -ENOMEM; return -ENOMEM;
i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size); /* Use user configured lut if there is one, otherwise use default */
if (vsi->rss_lut_user)
memcpy(lut, vsi->rss_lut_user, vsi->rss_table_size);
else
i40e_fill_rss_lut(pf, lut, vsi->rss_table_size, vsi->rss_size);
netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE); /* Use user configured hash key if there is one, otherwise
* use default.
*/
if (vsi->rss_hkey_user)
memcpy(seed, vsi->rss_hkey_user, I40E_HKEY_ARRAY_SIZE);
else
netdev_rss_key_fill((void *)seed, I40E_HKEY_ARRAY_SIZE);
ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size); ret = i40e_config_rss(vsi, seed, lut, vsi->rss_table_size);
kfree(lut); kfree(lut);
return ret; return ret;
...@@ -8060,13 +8093,28 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count) ...@@ -8060,13 +8093,28 @@ int i40e_reconfig_rss_queues(struct i40e_pf *pf, int queue_count)
vsi->req_queue_pairs = queue_count; vsi->req_queue_pairs = queue_count;
i40e_prep_for_reset(pf); i40e_prep_for_reset(pf);
pf->rss_size = new_rss_size; pf->alloc_rss_size = new_rss_size;
i40e_reset_and_rebuild(pf, true); i40e_reset_and_rebuild(pf, true);
/* Discard the user configured hash keys and lut, if less
* queues are enabled.
*/
if (queue_count < vsi->rss_size) {
i40e_clear_rss_config_user(vsi);
dev_dbg(&pf->pdev->dev,
"discard user configured hash keys and lut\n");
}
/* Reset vsi->rss_size, as number of enabled queues changed */
vsi->rss_size = min_t(int, pf->alloc_rss_size,
vsi->num_queue_pairs);
i40e_pf_config_rss(pf); i40e_pf_config_rss(pf);
} }
dev_info(&pf->pdev->dev, "RSS count: %d\n", pf->rss_size); dev_info(&pf->pdev->dev, "RSS count/HW max RSS count: %d/%d\n",
return pf->rss_size; pf->alloc_rss_size, pf->rss_size_max);
return pf->alloc_rss_size;
} }
/** /**
...@@ -8237,13 +8285,14 @@ static int i40e_sw_init(struct i40e_pf *pf) ...@@ -8237,13 +8285,14 @@ static int i40e_sw_init(struct i40e_pf *pf)
* maximum might end up larger than the available queues * maximum might end up larger than the available queues
*/ */
pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width); pf->rss_size_max = BIT(pf->hw.func_caps.rss_table_entry_width);
pf->rss_size = 1; pf->alloc_rss_size = 1;
pf->rss_table_size = pf->hw.func_caps.rss_table_size; pf->rss_table_size = pf->hw.func_caps.rss_table_size;
pf->rss_size_max = min_t(int, pf->rss_size_max, pf->rss_size_max = min_t(int, pf->rss_size_max,
pf->hw.func_caps.num_tx_qp); pf->hw.func_caps.num_tx_qp);
if (pf->hw.func_caps.rss) { if (pf->hw.func_caps.rss) {
pf->flags |= I40E_FLAG_RSS_ENABLED; pf->flags |= I40E_FLAG_RSS_ENABLED;
pf->rss_size = min_t(int, pf->rss_size_max, num_online_cpus()); pf->alloc_rss_size = min_t(int, pf->rss_size_max,
num_online_cpus());
} }
/* MFP mode enabled */ /* MFP mode enabled */
...@@ -9176,7 +9225,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi) ...@@ -9176,7 +9225,7 @@ int i40e_vsi_release(struct i40e_vsi *vsi)
f->is_vf, f->is_netdev); f->is_vf, f->is_netdev);
spin_unlock_bh(&vsi->mac_filter_list_lock); spin_unlock_bh(&vsi->mac_filter_list_lock);
i40e_sync_vsi_filters(vsi, false); i40e_sync_vsi_filters(vsi);
i40e_vsi_delete(vsi); i40e_vsi_delete(vsi);
i40e_vsi_free_q_vectors(vsi); i40e_vsi_free_q_vectors(vsi);
...@@ -10110,7 +10159,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) ...@@ -10110,7 +10159,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
!(pf->flags & I40E_FLAG_MSIX_ENABLED)) { !(pf->flags & I40E_FLAG_MSIX_ENABLED)) {
/* one qp for PF, no queues for anything else */ /* one qp for PF, no queues for anything else */
queues_left = 0; queues_left = 0;
pf->rss_size = pf->num_lan_qps = 1; pf->alloc_rss_size = pf->num_lan_qps = 1;
/* make sure all the fancies are disabled */ /* make sure all the fancies are disabled */
pf->flags &= ~(I40E_FLAG_RSS_ENABLED | pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
...@@ -10127,7 +10176,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) ...@@ -10127,7 +10176,7 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
I40E_FLAG_FD_ATR_ENABLED | I40E_FLAG_FD_ATR_ENABLED |
I40E_FLAG_DCB_CAPABLE))) { I40E_FLAG_DCB_CAPABLE))) {
/* one qp for PF */ /* one qp for PF */
pf->rss_size = pf->num_lan_qps = 1; pf->alloc_rss_size = pf->num_lan_qps = 1;
queues_left -= pf->num_lan_qps; queues_left -= pf->num_lan_qps;
pf->flags &= ~(I40E_FLAG_RSS_ENABLED | pf->flags &= ~(I40E_FLAG_RSS_ENABLED |
...@@ -10197,8 +10246,9 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf) ...@@ -10197,8 +10246,9 @@ static void i40e_determine_queue_usage(struct i40e_pf *pf)
"qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n", "qs_avail=%d FD SB=%d lan_qs=%d lan_tc0=%d vf=%d*%d vmdq=%d*%d, remaining=%d\n",
pf->hw.func_caps.num_tx_qp, pf->hw.func_caps.num_tx_qp,
!!(pf->flags & I40E_FLAG_FD_SB_ENABLED), !!(pf->flags & I40E_FLAG_FD_SB_ENABLED),
pf->num_lan_qps, pf->rss_size, pf->num_req_vfs, pf->num_vf_qps, pf->num_lan_qps, pf->alloc_rss_size, pf->num_req_vfs,
pf->num_vmdq_vsis, pf->num_vmdq_qps, queues_left); pf->num_vf_qps, pf->num_vmdq_vsis, pf->num_vmdq_qps,
queues_left);
#ifdef I40E_FCOE #ifdef I40E_FCOE
dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps); dev_dbg(&pf->pdev->dev, "fcoe queues = %d\n", pf->num_fcoe_qps);
#endif #endif
...@@ -10424,6 +10474,16 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -10424,6 +10474,16 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
pf->hw.fc.requested_mode = I40E_FC_NONE; pf->hw.fc.requested_mode = I40E_FC_NONE;
err = i40e_init_adminq(hw); err = i40e_init_adminq(hw);
if (err) {
if (err == I40E_ERR_FIRMWARE_API_VERSION)
dev_info(&pdev->dev,
"The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
else
dev_info(&pdev->dev,
"The driver for the device stopped because the device firmware failed to init. Try updating your NVM image.\n");
goto err_pf_reset;
}
/* provide nvm, fw, api versions */ /* provide nvm, fw, api versions */
dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n", dev_info(&pdev->dev, "fw %d.%d.%05d api %d.%d nvm %s\n",
...@@ -10431,12 +10491,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -10431,12 +10491,6 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
hw->aq.api_maj_ver, hw->aq.api_min_ver, hw->aq.api_maj_ver, hw->aq.api_min_ver,
i40e_nvm_version_str(hw)); i40e_nvm_version_str(hw));
if (err) {
dev_info(&pdev->dev,
"The driver for the device stopped because the NVM image is newer than expected. You must install the most recent version of the network driver.\n");
goto err_pf_reset;
}
if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR && if (hw->aq.api_maj_ver == I40E_FW_API_VERSION_MAJOR &&
hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR) hw->aq.api_min_ver > I40E_FW_API_VERSION_MINOR)
dev_info(&pdev->dev, dev_info(&pdev->dev,
......
...@@ -2806,6 +2806,9 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, ...@@ -2806,6 +2806,9 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
int tsyn; int tsyn;
int tso; int tso;
/* prefetch the data, we'll need it later */
prefetch(skb->data);
if (0 == i40e_xmit_descriptor_count(skb, tx_ring)) if (0 == i40e_xmit_descriptor_count(skb, tx_ring))
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
......
...@@ -565,7 +565,7 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type) ...@@ -565,7 +565,7 @@ static int i40e_alloc_vsi_res(struct i40e_vf *vf, enum i40e_vsi_type type)
} }
/* program mac filter */ /* program mac filter */
ret = i40e_sync_vsi_filters(vsi, false); ret = i40e_sync_vsi_filters(vsi);
if (ret) if (ret)
dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
...@@ -1094,8 +1094,8 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode, ...@@ -1094,8 +1094,8 @@ static int i40e_vc_send_msg_to_vf(struct i40e_vf *vf, u32 v_opcode,
/* single place to detect unsuccessful return values */ /* single place to detect unsuccessful return values */
if (v_retval) { if (v_retval) {
vf->num_invalid_msgs++; vf->num_invalid_msgs++;
dev_err(&pf->pdev->dev, "Failed opcode %d Error: %d\n", dev_err(&pf->pdev->dev, "VF %d failed opcode %d, error: %d\n",
v_opcode, v_retval); vf->vf_id, v_opcode, v_retval);
if (vf->num_invalid_msgs > if (vf->num_invalid_msgs >
I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) { I40E_DEFAULT_NUM_INVALID_MSGS_ALLOWED) {
dev_err(&pf->pdev->dev, dev_err(&pf->pdev->dev,
...@@ -1633,7 +1633,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -1633,7 +1633,7 @@ static int i40e_vc_add_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
spin_unlock_bh(&vsi->mac_filter_list_lock); spin_unlock_bh(&vsi->mac_filter_list_lock);
/* program the updated filter list */ /* program the updated filter list */
if (i40e_sync_vsi_filters(vsi, false)) if (i40e_sync_vsi_filters(vsi))
dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters\n", dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters\n",
vf->vf_id); vf->vf_id);
...@@ -1687,7 +1687,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen) ...@@ -1687,7 +1687,7 @@ static int i40e_vc_del_mac_addr_msg(struct i40e_vf *vf, u8 *msg, u16 msglen)
spin_unlock_bh(&vsi->mac_filter_list_lock); spin_unlock_bh(&vsi->mac_filter_list_lock);
/* program the updated filter list */ /* program the updated filter list */
if (i40e_sync_vsi_filters(vsi, false)) if (i40e_sync_vsi_filters(vsi))
dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters\n", dev_err(&pf->pdev->dev, "Unable to program VF %d MAC filters\n",
vf->vf_id); vf->vf_id);
...@@ -2102,7 +2102,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac) ...@@ -2102,7 +2102,7 @@ int i40e_ndo_set_vf_mac(struct net_device *netdev, int vf_id, u8 *mac)
dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id); dev_info(&pf->pdev->dev, "Setting MAC %pM on VF %d\n", mac, vf_id);
/* program mac filter */ /* program mac filter */
if (i40e_sync_vsi_filters(vsi, false)) { if (i40e_sync_vsi_filters(vsi)) {
dev_err(&pf->pdev->dev, "Unable to program ucast filters\n"); dev_err(&pf->pdev->dev, "Unable to program ucast filters\n");
ret = -EIO; ret = -EIO;
goto error_param; goto error_param;
......
...@@ -245,16 +245,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget) ...@@ -245,16 +245,6 @@ static bool i40e_clean_tx_irq(struct i40e_ring *tx_ring, int budget)
tx_ring->q_vector->tx.total_bytes += total_bytes; tx_ring->q_vector->tx.total_bytes += total_bytes;
tx_ring->q_vector->tx.total_packets += total_packets; tx_ring->q_vector->tx.total_packets += total_packets;
/* check to see if there are any non-cache aligned descriptors
* waiting to be written back, and kick the hardware to force
* them to be written back in case of napi polling
*/
if (budget &&
!((i & WB_STRIDE) == WB_STRIDE) &&
!test_bit(__I40E_DOWN, &tx_ring->vsi->state) &&
(I40E_DESC_UNUSED(tx_ring) != tx_ring->count))
tx_ring->arm_wb = true;
netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev, netdev_tx_completed_queue(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index), tx_ring->queue_index),
total_packets, total_bytes); total_packets, total_bytes);
...@@ -1770,6 +1760,9 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -1770,6 +1760,9 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
u32 td_tag = 0; u32 td_tag = 0;
dma_addr_t dma; dma_addr_t dma;
u16 gso_segs; u16 gso_segs;
u16 desc_count = 0;
bool tail_bump = true;
bool do_rs = false;
if (tx_flags & I40E_TX_FLAGS_HW_VLAN) { if (tx_flags & I40E_TX_FLAGS_HW_VLAN) {
td_cmd |= I40E_TX_DESC_CMD_IL2TAG1; td_cmd |= I40E_TX_DESC_CMD_IL2TAG1;
...@@ -1810,6 +1803,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -1810,6 +1803,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_desc++; tx_desc++;
i++; i++;
desc_count++;
if (i == tx_ring->count) { if (i == tx_ring->count) {
tx_desc = I40E_TX_DESC(tx_ring, 0); tx_desc = I40E_TX_DESC(tx_ring, 0);
i = 0; i = 0;
...@@ -1829,6 +1824,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -1829,6 +1824,8 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_desc++; tx_desc++;
i++; i++;
desc_count++;
if (i == tx_ring->count) { if (i == tx_ring->count) {
tx_desc = I40E_TX_DESC(tx_ring, 0); tx_desc = I40E_TX_DESC(tx_ring, 0);
i = 0; i = 0;
...@@ -1843,35 +1840,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -1843,35 +1840,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_bi = &tx_ring->tx_bi[i]; tx_bi = &tx_ring->tx_bi[i];
} }
/* Place RS bit on last descriptor of any packet that spans across the
* 4th descriptor (WB_STRIDE aka 0x3) in a 64B cacheline.
*/
#define WB_STRIDE 0x3 #define WB_STRIDE 0x3
if (((i & WB_STRIDE) != WB_STRIDE) &&
(first <= &tx_ring->tx_bi[i]) &&
(first >= &tx_ring->tx_bi[i & ~WB_STRIDE])) {
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset, size, td_tag) |
cpu_to_le64((u64)I40E_TX_DESC_CMD_EOP <<
I40E_TXD_QW1_CMD_SHIFT);
} else {
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset, size, td_tag) |
cpu_to_le64((u64)I40E_TXD_CMD <<
I40E_TXD_QW1_CMD_SHIFT);
}
netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index),
first->bytecount);
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64).
*/
wmb();
/* set next_to_watch value indicating a packet is present */ /* set next_to_watch value indicating a packet is present */
first->next_to_watch = tx_desc; first->next_to_watch = tx_desc;
...@@ -1881,15 +1850,78 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -1881,15 +1850,78 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i; tx_ring->next_to_use = i;
netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index),
first->bytecount);
i40evf_maybe_stop_tx(tx_ring, DESC_NEEDED); i40evf_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* Algorithm to optimize tail and RS bit setting:
* if xmit_more is supported
* if xmit_more is true
* do not update tail and do not mark RS bit.
* if xmit_more is false and last xmit_more was false
* if every packet spanned less than 4 desc
* then set RS bit on 4th packet and update tail
* on every packet
* else
* update tail and set RS bit on every packet.
* if xmit_more is false and last_xmit_more was true
* update tail and set RS bit.
* else (kernel < 3.18)
* if every packet spanned less than 4 desc
* then set RS bit on 4th packet and update tail
* on every packet
* else
* set RS bit on EOP for every packet and update tail
*
* Optimization: wmb to be issued only in case of tail update.
* Also optimize the Descriptor WB path for RS bit with the same
* algorithm.
*
* Note: If there are less than 4 packets
* pending and interrupts were disabled the service task will
* trigger a force WB.
*/
if (skb->xmit_more &&
!netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index))) {
tx_ring->flags |= I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
tail_bump = false;
} else if (!skb->xmit_more &&
!netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index)) &&
(!(tx_ring->flags & I40E_TXR_FLAGS_LAST_XMIT_MORE_SET)) &&
(tx_ring->packet_stride < WB_STRIDE) &&
(desc_count < WB_STRIDE)) {
tx_ring->packet_stride++;
} else {
tx_ring->packet_stride = 0;
tx_ring->flags &= ~I40E_TXR_FLAGS_LAST_XMIT_MORE_SET;
do_rs = true;
}
if (do_rs)
tx_ring->packet_stride = 0;
tx_desc->cmd_type_offset_bsz =
build_ctob(td_cmd, td_offset, size, td_tag) |
cpu_to_le64((u64)(do_rs ? I40E_TXD_CMD :
I40E_TX_DESC_CMD_EOP) <<
I40E_TXD_QW1_CMD_SHIFT);
/* notify HW of packet */ /* notify HW of packet */
if (!skb->xmit_more || if (!tail_bump)
netif_xmit_stopped(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index)))
writel(i, tx_ring->tail);
else
prefetchw(tx_desc + 1); prefetchw(tx_desc + 1);
if (tail_bump) {
/* Force memory writes to complete before letting h/w
* know there are new descriptors to fetch. (Only
* applicable for weak-ordered memory model archs,
* such as IA-64).
*/
wmb();
writel(i, tx_ring->tail);
}
return; return;
dma_error: dma_error:
...@@ -1961,6 +1993,9 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, ...@@ -1961,6 +1993,9 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
u8 hdr_len = 0; u8 hdr_len = 0;
int tso; int tso;
/* prefetch the data, we'll need it later */
prefetch(skb->data);
if (0 == i40evf_xmit_descriptor_count(skb, tx_ring)) if (0 == i40evf_xmit_descriptor_count(skb, tx_ring))
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
...@@ -2028,7 +2063,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, ...@@ -2028,7 +2063,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev) netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{ {
struct i40evf_adapter *adapter = netdev_priv(netdev); struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40e_ring *tx_ring = adapter->tx_rings[skb->queue_mapping]; struct i40e_ring *tx_ring = &adapter->tx_rings[skb->queue_mapping];
/* hardware can't handle really short frames, hardware padding works /* hardware can't handle really short frames, hardware padding works
* beyond this point * beyond this point
......
...@@ -268,6 +268,8 @@ struct i40e_ring { ...@@ -268,6 +268,8 @@ struct i40e_ring {
bool ring_active; /* is ring online or not */ bool ring_active; /* is ring online or not */
bool arm_wb; /* do something to arm write back */ bool arm_wb; /* do something to arm write back */
u8 packet_stride;
#define I40E_TXR_FLAGS_LAST_XMIT_MORE_SET BIT(2)
u16 flags; u16 flags;
#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0) #define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
......
...@@ -67,6 +67,8 @@ struct i40e_vsi { ...@@ -67,6 +67,8 @@ struct i40e_vsi {
u16 rx_itr_setting; u16 rx_itr_setting;
u16 tx_itr_setting; u16 tx_itr_setting;
u16 qs_handle; u16 qs_handle;
u8 *rss_hkey_user; /* User configured hash keys */
u8 *rss_lut_user; /* User configured lookup table entries */
}; };
/* How many Rx Buffers do we bundle into one write to the hardware ? */ /* How many Rx Buffers do we bundle into one write to the hardware ? */
...@@ -99,6 +101,7 @@ struct i40e_vsi { ...@@ -99,6 +101,7 @@ struct i40e_vsi {
#define MAX_TX_QUEUES MAX_RX_QUEUES #define MAX_TX_QUEUES MAX_RX_QUEUES
#define I40EVF_HKEY_ARRAY_SIZE ((I40E_VFQF_HKEY_MAX_INDEX + 1) * 4) #define I40EVF_HKEY_ARRAY_SIZE ((I40E_VFQF_HKEY_MAX_INDEX + 1) * 4)
#define I40EVF_HLUT_ARRAY_SIZE ((I40E_VFQF_HLUT_MAX_INDEX + 1) * 4)
/* MAX_MSIX_Q_VECTORS of these are allocated, /* MAX_MSIX_Q_VECTORS of these are allocated,
* but we only use one per queue-specific vector. * but we only use one per queue-specific vector.
...@@ -142,9 +145,6 @@ struct i40e_q_vector { ...@@ -142,9 +145,6 @@ struct i40e_q_vector {
#define OTHER_VECTOR 1 #define OTHER_VECTOR 1
#define NONQ_VECS (OTHER_VECTOR) #define NONQ_VECS (OTHER_VECTOR)
#define MAX_MSIX_Q_VECTORS 4
#define MAX_MSIX_COUNT 5
#define MIN_MSIX_Q_VECTORS 1 #define MIN_MSIX_Q_VECTORS 1
#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NONQ_VECS) #define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NONQ_VECS)
...@@ -190,19 +190,19 @@ struct i40evf_adapter { ...@@ -190,19 +190,19 @@ struct i40evf_adapter {
struct work_struct reset_task; struct work_struct reset_task;
struct work_struct adminq_task; struct work_struct adminq_task;
struct delayed_work init_task; struct delayed_work init_task;
struct i40e_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; struct i40e_q_vector *q_vectors;
struct list_head vlan_filter_list; struct list_head vlan_filter_list;
char misc_vector_name[IFNAMSIZ + 9]; char misc_vector_name[IFNAMSIZ + 9];
int num_active_queues; int num_active_queues;
/* TX */ /* TX */
struct i40e_ring *tx_rings[I40E_MAX_VSI_QP]; struct i40e_ring *tx_rings;
u32 tx_timeout_count; u32 tx_timeout_count;
struct list_head mac_filter_list; struct list_head mac_filter_list;
u32 tx_desc_count; u32 tx_desc_count;
/* RX */ /* RX */
struct i40e_ring *rx_rings[I40E_MAX_VSI_QP]; struct i40e_ring *rx_rings;
u64 hw_csum_rx_error; u64 hw_csum_rx_error;
u32 rx_desc_count; u32 rx_desc_count;
int num_msix_vectors; int num_msix_vectors;
...@@ -313,4 +313,8 @@ void i40evf_request_reset(struct i40evf_adapter *adapter); ...@@ -313,4 +313,8 @@ void i40evf_request_reset(struct i40evf_adapter *adapter);
void i40evf_virtchnl_completion(struct i40evf_adapter *adapter, void i40evf_virtchnl_completion(struct i40evf_adapter *adapter,
enum i40e_virtchnl_ops v_opcode, enum i40e_virtchnl_ops v_opcode,
i40e_status v_retval, u8 *msg, u16 msglen); i40e_status v_retval, u8 *msg, u16 msglen);
int i40evf_config_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut,
u16 lut_size);
int i40evf_get_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut,
u16 lut_size);
#endif /* _I40EVF_H_ */ #endif /* _I40EVF_H_ */
...@@ -121,12 +121,12 @@ static void i40evf_get_ethtool_stats(struct net_device *netdev, ...@@ -121,12 +121,12 @@ static void i40evf_get_ethtool_stats(struct net_device *netdev,
data[i] = *(u64 *)p; data[i] = *(u64 *)p;
} }
for (j = 0; j < adapter->num_active_queues; j++) { for (j = 0; j < adapter->num_active_queues; j++) {
data[i++] = adapter->tx_rings[j]->stats.packets; data[i++] = adapter->tx_rings[j].stats.packets;
data[i++] = adapter->tx_rings[j]->stats.bytes; data[i++] = adapter->tx_rings[j].stats.bytes;
} }
for (j = 0; j < adapter->num_active_queues; j++) { for (j = 0; j < adapter->num_active_queues; j++) {
data[i++] = adapter->rx_rings[j]->stats.packets; data[i++] = adapter->rx_rings[j].stats.packets;
data[i++] = adapter->rx_rings[j]->stats.bytes; data[i++] = adapter->rx_rings[j].stats.bytes;
} }
} }
...@@ -351,7 +351,7 @@ static int i40evf_set_coalesce(struct net_device *netdev, ...@@ -351,7 +351,7 @@ static int i40evf_set_coalesce(struct net_device *netdev,
vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC; vsi->tx_itr_setting &= ~I40E_ITR_DYNAMIC;
for (i = 0; i < adapter->num_msix_vectors - NONQ_VECS; i++) { for (i = 0; i < adapter->num_msix_vectors - NONQ_VECS; i++) {
q_vector = adapter->q_vector[i]; q_vector = &adapter->q_vectors[i];
q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting); q_vector->rx.itr = ITR_TO_REG(vsi->rx_itr_setting);
wr32(hw, I40E_VFINT_ITRN1(0, i), q_vector->rx.itr); wr32(hw, I40E_VFINT_ITRN1(0, i), q_vector->rx.itr);
q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting); q_vector->tx.itr = ITR_TO_REG(vsi->tx_itr_setting);
...@@ -634,25 +634,34 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, ...@@ -634,25 +634,34 @@ static int i40evf_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key,
u8 *hfunc) u8 *hfunc)
{ {
struct i40evf_adapter *adapter = netdev_priv(netdev); struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40e_hw *hw = &adapter->hw; struct i40e_vsi *vsi = &adapter->vsi;
u32 hlut_val; u8 *seed = NULL, *lut;
int i, j; int ret;
u16 i;
if (hfunc) if (hfunc)
*hfunc = ETH_RSS_HASH_TOP; *hfunc = ETH_RSS_HASH_TOP;
if (!indir) if (!indir)
return 0; return 0;
if (indir) { seed = key;
for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) {
hlut_val = rd32(hw, I40E_VFQF_HLUT(i)); lut = kzalloc(I40EVF_HLUT_ARRAY_SIZE, GFP_KERNEL);
indir[j++] = hlut_val & 0xff; if (!lut)
indir[j++] = (hlut_val >> 8) & 0xff; return -ENOMEM;
indir[j++] = (hlut_val >> 16) & 0xff;
indir[j++] = (hlut_val >> 24) & 0xff; ret = i40evf_get_rss(vsi, seed, lut, I40EVF_HLUT_ARRAY_SIZE);
} if (ret)
} goto out;
return 0;
/* Each 32 bits pointed by 'indir' is stored with a lut entry */
for (i = 0; i < I40EVF_HLUT_ARRAY_SIZE; i++)
indir[i] = (u32)lut[i];
out:
kfree(lut);
return ret;
} }
/** /**
...@@ -668,9 +677,9 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir, ...@@ -668,9 +677,9 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir,
const u8 *key, const u8 hfunc) const u8 *key, const u8 hfunc)
{ {
struct i40evf_adapter *adapter = netdev_priv(netdev); struct i40evf_adapter *adapter = netdev_priv(netdev);
struct i40e_hw *hw = &adapter->hw; struct i40e_vsi *vsi = &adapter->vsi;
u32 hlut_val; u8 *seed = NULL;
int i, j; u16 i;
/* We do not allow change in unsupported parameters */ /* We do not allow change in unsupported parameters */
if (key || if (key ||
...@@ -679,15 +688,29 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir, ...@@ -679,15 +688,29 @@ static int i40evf_set_rxfh(struct net_device *netdev, const u32 *indir,
if (!indir) if (!indir)
return 0; return 0;
for (i = 0, j = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { if (key) {
hlut_val = indir[j++]; if (!vsi->rss_hkey_user) {
hlut_val |= indir[j++] << 8; vsi->rss_hkey_user = kzalloc(I40EVF_HKEY_ARRAY_SIZE,
hlut_val |= indir[j++] << 16; GFP_KERNEL);
hlut_val |= indir[j++] << 24; if (!vsi->rss_hkey_user)
wr32(hw, I40E_VFQF_HLUT(i), hlut_val); return -ENOMEM;
}
memcpy(vsi->rss_hkey_user, key, I40EVF_HKEY_ARRAY_SIZE);
seed = vsi->rss_hkey_user;
}
if (!vsi->rss_lut_user) {
vsi->rss_lut_user = kzalloc(I40EVF_HLUT_ARRAY_SIZE,
GFP_KERNEL);
if (!vsi->rss_lut_user)
return -ENOMEM;
} }
return 0; /* Each 32 bits pointed by 'indir' is stored with a lut entry */
for (i = 0; i < I40EVF_HLUT_ARRAY_SIZE; i++)
vsi->rss_lut_user[i] = (u8)(indir[i]);
return i40evf_config_rss(vsi, seed, vsi->rss_lut_user,
I40EVF_HLUT_ARRAY_SIZE);
} }
static const struct ethtool_ops i40evf_ethtool_ops = { static const struct ethtool_ops i40evf_ethtool_ops = {
......
...@@ -34,7 +34,7 @@ char i40evf_driver_name[] = "i40evf"; ...@@ -34,7 +34,7 @@ char i40evf_driver_name[] = "i40evf";
static const char i40evf_driver_string[] = static const char i40evf_driver_string[] =
"Intel(R) XL710/X710 Virtual Function Network Driver"; "Intel(R) XL710/X710 Virtual Function Network Driver";
#define DRV_VERSION "1.3.33" #define DRV_VERSION "1.4.1"
const char i40evf_driver_version[] = DRV_VERSION; const char i40evf_driver_version[] = DRV_VERSION;
static const char i40evf_copyright[] = static const char i40evf_copyright[] =
"Copyright (c) 2013 - 2015 Intel Corporation."; "Copyright (c) 2013 - 2015 Intel Corporation.";
...@@ -347,8 +347,8 @@ static irqreturn_t i40evf_msix_clean_rings(int irq, void *data) ...@@ -347,8 +347,8 @@ static irqreturn_t i40evf_msix_clean_rings(int irq, void *data)
static void static void
i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx) i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx)
{ {
struct i40e_q_vector *q_vector = adapter->q_vector[v_idx]; struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
struct i40e_ring *rx_ring = adapter->rx_rings[r_idx]; struct i40e_ring *rx_ring = &adapter->rx_rings[r_idx];
rx_ring->q_vector = q_vector; rx_ring->q_vector = q_vector;
rx_ring->next = q_vector->rx.ring; rx_ring->next = q_vector->rx.ring;
...@@ -368,8 +368,8 @@ i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx) ...@@ -368,8 +368,8 @@ i40evf_map_vector_to_rxq(struct i40evf_adapter *adapter, int v_idx, int r_idx)
static void static void
i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx) i40evf_map_vector_to_txq(struct i40evf_adapter *adapter, int v_idx, int t_idx)
{ {
struct i40e_q_vector *q_vector = adapter->q_vector[v_idx]; struct i40e_q_vector *q_vector = &adapter->q_vectors[v_idx];
struct i40e_ring *tx_ring = adapter->tx_rings[t_idx]; struct i40e_ring *tx_ring = &adapter->tx_rings[t_idx];
tx_ring->q_vector = q_vector; tx_ring->q_vector = q_vector;
tx_ring->next = q_vector->tx.ring; tx_ring->next = q_vector->tx.ring;
...@@ -464,7 +464,7 @@ static void i40evf_netpoll(struct net_device *netdev) ...@@ -464,7 +464,7 @@ static void i40evf_netpoll(struct net_device *netdev)
return; return;
for (i = 0; i < q_vectors; i++) for (i = 0; i < q_vectors; i++)
i40evf_msix_clean_rings(0, adapter->q_vector[i]); i40evf_msix_clean_rings(0, &adapter->q_vectors[i]);
} }
#endif #endif
...@@ -486,7 +486,7 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename) ...@@ -486,7 +486,7 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
q_vectors = adapter->num_msix_vectors - NONQ_VECS; q_vectors = adapter->num_msix_vectors - NONQ_VECS;
for (vector = 0; vector < q_vectors; vector++) { for (vector = 0; vector < q_vectors; vector++) {
struct i40e_q_vector *q_vector = adapter->q_vector[vector]; struct i40e_q_vector *q_vector = &adapter->q_vectors[vector];
if (q_vector->tx.ring && q_vector->rx.ring) { if (q_vector->tx.ring && q_vector->rx.ring) {
snprintf(q_vector->name, sizeof(q_vector->name) - 1, snprintf(q_vector->name, sizeof(q_vector->name) - 1,
...@@ -531,7 +531,7 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename) ...@@ -531,7 +531,7 @@ i40evf_request_traffic_irqs(struct i40evf_adapter *adapter, char *basename)
adapter->msix_entries[vector + NONQ_VECS].vector, adapter->msix_entries[vector + NONQ_VECS].vector,
NULL); NULL);
free_irq(adapter->msix_entries[vector + NONQ_VECS].vector, free_irq(adapter->msix_entries[vector + NONQ_VECS].vector,
adapter->q_vector[vector]); &adapter->q_vectors[vector]);
} }
return err; return err;
} }
...@@ -581,7 +581,7 @@ static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter) ...@@ -581,7 +581,7 @@ static void i40evf_free_traffic_irqs(struct i40evf_adapter *adapter)
irq_set_affinity_hint(adapter->msix_entries[i+1].vector, irq_set_affinity_hint(adapter->msix_entries[i+1].vector,
NULL); NULL);
free_irq(adapter->msix_entries[i+1].vector, free_irq(adapter->msix_entries[i+1].vector,
adapter->q_vector[i]); &adapter->q_vectors[i]);
} }
} }
...@@ -610,7 +610,7 @@ static void i40evf_configure_tx(struct i40evf_adapter *adapter) ...@@ -610,7 +610,7 @@ static void i40evf_configure_tx(struct i40evf_adapter *adapter)
int i; int i;
for (i = 0; i < adapter->num_active_queues; i++) for (i = 0; i < adapter->num_active_queues; i++)
adapter->tx_rings[i]->tail = hw->hw_addr + I40E_QTX_TAIL1(i); adapter->tx_rings[i].tail = hw->hw_addr + I40E_QTX_TAIL1(i);
} }
/** /**
...@@ -655,8 +655,8 @@ static void i40evf_configure_rx(struct i40evf_adapter *adapter) ...@@ -655,8 +655,8 @@ static void i40evf_configure_rx(struct i40evf_adapter *adapter)
} }
for (i = 0; i < adapter->num_active_queues; i++) { for (i = 0; i < adapter->num_active_queues; i++) {
adapter->rx_rings[i]->tail = hw->hw_addr + I40E_QRX_TAIL1(i); adapter->rx_rings[i].tail = hw->hw_addr + I40E_QRX_TAIL1(i);
adapter->rx_rings[i]->rx_buf_len = rx_buf_len; adapter->rx_rings[i].rx_buf_len = rx_buf_len;
} }
} }
...@@ -953,7 +953,7 @@ static void i40evf_napi_enable_all(struct i40evf_adapter *adapter) ...@@ -953,7 +953,7 @@ static void i40evf_napi_enable_all(struct i40evf_adapter *adapter)
for (q_idx = 0; q_idx < q_vectors; q_idx++) { for (q_idx = 0; q_idx < q_vectors; q_idx++) {
struct napi_struct *napi; struct napi_struct *napi;
q_vector = adapter->q_vector[q_idx]; q_vector = &adapter->q_vectors[q_idx];
napi = &q_vector->napi; napi = &q_vector->napi;
napi_enable(napi); napi_enable(napi);
} }
...@@ -970,7 +970,7 @@ static void i40evf_napi_disable_all(struct i40evf_adapter *adapter) ...@@ -970,7 +970,7 @@ static void i40evf_napi_disable_all(struct i40evf_adapter *adapter)
int q_vectors = adapter->num_msix_vectors - NONQ_VECS; int q_vectors = adapter->num_msix_vectors - NONQ_VECS;
for (q_idx = 0; q_idx < q_vectors; q_idx++) { for (q_idx = 0; q_idx < q_vectors; q_idx++) {
q_vector = adapter->q_vector[q_idx]; q_vector = &adapter->q_vectors[q_idx];
napi_disable(&q_vector->napi); napi_disable(&q_vector->napi);
} }
} }
...@@ -991,7 +991,7 @@ static void i40evf_configure(struct i40evf_adapter *adapter) ...@@ -991,7 +991,7 @@ static void i40evf_configure(struct i40evf_adapter *adapter)
adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES; adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_QUEUES;
for (i = 0; i < adapter->num_active_queues; i++) { for (i = 0; i < adapter->num_active_queues; i++) {
struct i40e_ring *ring = adapter->rx_rings[i]; struct i40e_ring *ring = &adapter->rx_rings[i];
i40evf_alloc_rx_buffers_1buf(ring, ring->count); i40evf_alloc_rx_buffers_1buf(ring, ring->count);
ring->next_to_use = ring->count - 1; ring->next_to_use = ring->count - 1;
...@@ -1111,16 +1111,10 @@ i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors) ...@@ -1111,16 +1111,10 @@ i40evf_acquire_msix_vectors(struct i40evf_adapter *adapter, int vectors)
**/ **/
static void i40evf_free_queues(struct i40evf_adapter *adapter) static void i40evf_free_queues(struct i40evf_adapter *adapter)
{ {
int i;
if (!adapter->vsi_res) if (!adapter->vsi_res)
return; return;
for (i = 0; i < adapter->num_active_queues; i++) { kfree(adapter->tx_rings);
if (adapter->tx_rings[i]) kfree(adapter->rx_rings);
kfree_rcu(adapter->tx_rings[i], rcu);
adapter->tx_rings[i] = NULL;
adapter->rx_rings[i] = NULL;
}
} }
/** /**
...@@ -1135,13 +1129,20 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter) ...@@ -1135,13 +1129,20 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
{ {
int i; int i;
adapter->tx_rings = kcalloc(adapter->num_active_queues,
sizeof(struct i40e_ring), GFP_KERNEL);
if (!adapter->tx_rings)
goto err_out;
adapter->rx_rings = kcalloc(adapter->num_active_queues,
sizeof(struct i40e_ring), GFP_KERNEL);
if (!adapter->rx_rings)
goto err_out;
for (i = 0; i < adapter->num_active_queues; i++) { for (i = 0; i < adapter->num_active_queues; i++) {
struct i40e_ring *tx_ring; struct i40e_ring *tx_ring;
struct i40e_ring *rx_ring; struct i40e_ring *rx_ring;
tx_ring = kzalloc(sizeof(*tx_ring) * 2, GFP_KERNEL); tx_ring = &adapter->tx_rings[i];
if (!tx_ring)
goto err_out;
tx_ring->queue_index = i; tx_ring->queue_index = i;
tx_ring->netdev = adapter->netdev; tx_ring->netdev = adapter->netdev;
...@@ -1149,14 +1150,12 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter) ...@@ -1149,14 +1150,12 @@ static int i40evf_alloc_queues(struct i40evf_adapter *adapter)
tx_ring->count = adapter->tx_desc_count; tx_ring->count = adapter->tx_desc_count;
if (adapter->flags & I40E_FLAG_WB_ON_ITR_CAPABLE) if (adapter->flags & I40E_FLAG_WB_ON_ITR_CAPABLE)
tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR; tx_ring->flags |= I40E_TXR_FLAGS_WB_ON_ITR;
adapter->tx_rings[i] = tx_ring;
rx_ring = &tx_ring[1]; rx_ring = &adapter->rx_rings[i];
rx_ring->queue_index = i; rx_ring->queue_index = i;
rx_ring->netdev = adapter->netdev; rx_ring->netdev = adapter->netdev;
rx_ring->dev = &adapter->pdev->dev; rx_ring->dev = &adapter->pdev->dev;
rx_ring->count = adapter->rx_desc_count; rx_ring->count = adapter->rx_desc_count;
adapter->rx_rings[i] = rx_ring;
} }
return 0; return 0;
...@@ -1211,110 +1210,267 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter) ...@@ -1211,110 +1210,267 @@ static int i40evf_set_interrupt_capability(struct i40evf_adapter *adapter)
} }
/** /**
* i40e_configure_rss_aq - Prepare for RSS using AQ commands * i40e_config_rss_aq - Prepare for RSS using AQ commands
* @vsi: vsi structure * @vsi: vsi structure
* @seed: RSS hash seed * @seed: RSS hash seed
* @lut: Lookup table
* @lut_size: Lookup table size
*
* Return 0 on success, negative on failure
**/ **/
static void i40evf_configure_rss_aq(struct i40e_vsi *vsi, const u8 *seed) static int i40evf_config_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
u8 *lut, u16 lut_size)
{ {
struct i40e_aqc_get_set_rss_key_data rss_key;
struct i40evf_adapter *adapter = vsi->back; struct i40evf_adapter *adapter = vsi->back;
struct i40e_hw *hw = &adapter->hw; struct i40e_hw *hw = &adapter->hw;
int ret = 0, i; int ret = 0;
u8 *rss_lut;
if (!vsi->id) if (!vsi->id)
return; return -EINVAL;
if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) { if (adapter->current_op != I40E_VIRTCHNL_OP_UNKNOWN) {
/* bail because we already have a command pending */ /* bail because we already have a command pending */
dev_err(&adapter->pdev->dev, "Cannot confiure RSS, command %d pending\n", dev_err(&adapter->pdev->dev, "Cannot confiure RSS, command %d pending\n",
adapter->current_op); adapter->current_op);
return; return -EBUSY;
} }
memset(&rss_key, 0, sizeof(rss_key)); if (seed) {
memcpy(&rss_key, seed, sizeof(rss_key)); struct i40e_aqc_get_set_rss_key_data *rss_key =
(struct i40e_aqc_get_set_rss_key_data *)seed;
ret = i40evf_aq_set_rss_key(hw, vsi->id, rss_key);
if (ret) {
dev_err(&adapter->pdev->dev, "Cannot set RSS key, err %s aq_err %s\n",
i40evf_stat_str(hw, ret),
i40evf_aq_str(hw, hw->aq.asq_last_status));
return ret;
}
}
rss_lut = kzalloc(((I40E_VFQF_HLUT_MAX_INDEX + 1) * 4), GFP_KERNEL); if (lut) {
if (!rss_lut) ret = i40evf_aq_set_rss_lut(hw, vsi->id, false, lut, lut_size);
return; if (ret) {
dev_err(&adapter->pdev->dev,
"Cannot set RSS lut, err %s aq_err %s\n",
i40evf_stat_str(hw, ret),
i40evf_aq_str(hw, hw->aq.asq_last_status));
return ret;
}
}
/* Populate the LUT with max no. PF queues in round robin fashion */ return ret;
for (i = 0; i <= (I40E_VFQF_HLUT_MAX_INDEX * 4); i++) }
rss_lut[i] = i % adapter->num_active_queues;
ret = i40evf_aq_set_rss_key(hw, vsi->id, &rss_key); /**
if (ret) { * i40evf_config_rss_reg - Configure RSS keys and lut by writing registers
dev_err(&adapter->pdev->dev, * @vsi: Pointer to vsi structure
"Cannot set RSS key, err %s aq_err %s\n", * @seed: RSS hash seed
i40evf_stat_str(hw, ret), * @lut: Lookup table
i40evf_aq_str(hw, hw->aq.asq_last_status)); * @lut_size: Lookup table size
return; *
* Returns 0 on success, negative on failure
**/
static int i40evf_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
const u8 *lut, u16 lut_size)
{
struct i40evf_adapter *adapter = vsi->back;
struct i40e_hw *hw = &adapter->hw;
u16 i;
if (seed) {
u32 *seed_dw = (u32 *)seed;
for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
wr32(hw, I40E_VFQF_HKEY(i), seed_dw[i]);
} }
ret = i40evf_aq_set_rss_lut(hw, vsi->id, false, rss_lut, if (lut) {
(I40E_VFQF_HLUT_MAX_INDEX + 1) * 4); u32 *lut_dw = (u32 *)lut;
if (ret)
dev_err(&adapter->pdev->dev, if (lut_size != I40EVF_HLUT_ARRAY_SIZE)
"Cannot set RSS lut, err %s aq_err %s\n", return -EINVAL;
i40evf_stat_str(hw, ret),
i40evf_aq_str(hw, hw->aq.asq_last_status)); for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
wr32(hw, I40E_VFQF_HLUT(i), lut_dw[i]);
}
i40e_flush(hw);
return 0;
} }
/** /**
* i40e_configure_rss_reg - Prepare for RSS if used * * i40evf_get_rss_aq - Get RSS keys and lut by using AQ commands
* @adapter: board private structure * @vsi: Pointer to vsi structure
* @seed: RSS hash seed * @seed: RSS hash seed
* @lut: Lookup table
* @lut_size: Lookup table size
*
* Return 0 on success, negative on failure
**/ **/
static void i40evf_configure_rss_reg(struct i40evf_adapter *adapter, static int i40evf_get_rss_aq(struct i40e_vsi *vsi, const u8 *seed,
const u8 *seed) u8 *lut, u16 lut_size)
{ {
struct i40evf_adapter *adapter = vsi->back;
struct i40e_hw *hw = &adapter->hw; struct i40e_hw *hw = &adapter->hw;
u32 *seed_dw = (u32 *)seed; int ret = 0;
u32 cqueue = 0;
u32 lut = 0;
int i, j;
/* Fill out hash function seed */ if (seed) {
for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++) ret = i40evf_aq_get_rss_key(hw, vsi->id,
wr32(hw, I40E_VFQF_HKEY(i), seed_dw[i]); (struct i40e_aqc_get_set_rss_key_data *)seed);
if (ret) {
/* Populate the LUT with max no. PF queues in round robin fashion */ dev_err(&adapter->pdev->dev,
for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++) { "Cannot get RSS key, err %s aq_err %s\n",
lut = 0; i40evf_stat_str(hw, ret),
for (j = 0; j < 4; j++) { i40evf_aq_str(hw, hw->aq.asq_last_status));
if (cqueue == adapter->num_active_queues) return ret;
cqueue = 0;
lut |= ((cqueue) << (8 * j));
cqueue++;
} }
wr32(hw, I40E_VFQF_HLUT(i), lut);
} }
i40e_flush(hw);
if (lut) {
ret = i40evf_aq_get_rss_lut(hw, vsi->id, seed, lut, lut_size);
if (ret) {
dev_err(&adapter->pdev->dev,
"Cannot get RSS lut, err %s aq_err %s\n",
i40evf_stat_str(hw, ret),
i40evf_aq_str(hw, hw->aq.asq_last_status));
return ret;
}
}
return ret;
}
/**
* * i40evf_get_rss_reg - Get RSS keys and lut by reading registers
* @vsi: Pointer to vsi structure
* @seed: RSS hash seed
* @lut: Lookup table
* @lut_size: Lookup table size
*
* Returns 0 on success, negative on failure
**/
static int i40evf_get_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
const u8 *lut, u16 lut_size)
{
struct i40evf_adapter *adapter = vsi->back;
struct i40e_hw *hw = &adapter->hw;
u16 i;
if (seed) {
u32 *seed_dw = (u32 *)seed;
for (i = 0; i <= I40E_VFQF_HKEY_MAX_INDEX; i++)
seed_dw[i] = rd32(hw, I40E_VFQF_HKEY(i));
}
if (lut) {
u32 *lut_dw = (u32 *)lut;
if (lut_size != I40EVF_HLUT_ARRAY_SIZE)
return -EINVAL;
for (i = 0; i <= I40E_VFQF_HLUT_MAX_INDEX; i++)
lut_dw[i] = rd32(hw, I40E_VFQF_HLUT(i));
}
return 0;
}
/**
* i40evf_config_rss - Configure RSS keys and lut
* @vsi: Pointer to vsi structure
* @seed: RSS hash seed
* @lut: Lookup table
* @lut_size: Lookup table size
*
* Returns 0 on success, negative on failure
**/
int i40evf_config_rss(struct i40e_vsi *vsi, const u8 *seed,
u8 *lut, u16 lut_size)
{
struct i40evf_adapter *adapter = vsi->back;
if (RSS_AQ(adapter))
return i40evf_config_rss_aq(vsi, seed, lut, lut_size);
else
return i40evf_config_rss_reg(vsi, seed, lut, lut_size);
}
/**
* i40evf_get_rss - Get RSS keys and lut
* @vsi: Pointer to vsi structure
* @seed: RSS hash seed
* @lut: Lookup table
* @lut_size: Lookup table size
*
* Returns 0 on success, negative on failure
**/
int i40evf_get_rss(struct i40e_vsi *vsi, const u8 *seed, u8 *lut, u16 lut_size)
{
struct i40evf_adapter *adapter = vsi->back;
if (RSS_AQ(adapter))
return i40evf_get_rss_aq(vsi, seed, lut, lut_size);
else
return i40evf_get_rss_reg(vsi, seed, lut, lut_size);
}
/**
* i40evf_fill_rss_lut - Fill the lut with default values
* @lut: Lookup table to be filled with
* @rss_table_size: Lookup table size
* @rss_size: Range of queue number for hashing
**/
static void i40evf_fill_rss_lut(u8 *lut, u16 rss_table_size, u16 rss_size)
{
u16 i;
for (i = 0; i < rss_table_size; i++)
lut[i] = i % rss_size;
} }
/** /**
* i40evf_configure_rss - Prepare for RSS * i40evf_init_rss - Prepare for RSS
* @adapter: board private structure * @adapter: board private structure
*
* Return 0 on success, negative on failure
**/ **/
static void i40evf_configure_rss(struct i40evf_adapter *adapter) static int i40evf_init_rss(struct i40evf_adapter *adapter)
{ {
struct i40e_vsi *vsi = &adapter->vsi;
struct i40e_hw *hw = &adapter->hw; struct i40e_hw *hw = &adapter->hw;
u8 seed[I40EVF_HKEY_ARRAY_SIZE]; u8 seed[I40EVF_HKEY_ARRAY_SIZE];
u64 hena; u64 hena;
u8 *lut;
netdev_rss_key_fill((void *)seed, I40EVF_HKEY_ARRAY_SIZE); int ret;
/* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */ /* Enable PCTYPES for RSS, TCP/UDP with IPv4/IPv6 */
hena = I40E_DEFAULT_RSS_HENA; hena = I40E_DEFAULT_RSS_HENA;
wr32(hw, I40E_VFQF_HENA(0), (u32)hena); wr32(hw, I40E_VFQF_HENA(0), (u32)hena);
wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32)); wr32(hw, I40E_VFQF_HENA(1), (u32)(hena >> 32));
if (RSS_AQ(adapter)) lut = kzalloc(I40EVF_HLUT_ARRAY_SIZE, GFP_KERNEL);
i40evf_configure_rss_aq(&adapter->vsi, seed); if (!lut)
return -ENOMEM;
/* Use user configured lut if there is one, otherwise use default */
if (vsi->rss_lut_user)
memcpy(lut, vsi->rss_lut_user, I40EVF_HLUT_ARRAY_SIZE);
else else
i40evf_configure_rss_reg(adapter, seed); i40evf_fill_rss_lut(lut, I40EVF_HLUT_ARRAY_SIZE,
adapter->num_active_queues);
/* Use user configured hash key if there is one, otherwise
* user default.
*/
if (vsi->rss_hkey_user)
memcpy(seed, vsi->rss_hkey_user, I40EVF_HKEY_ARRAY_SIZE);
else
netdev_rss_key_fill((void *)seed, I40EVF_HKEY_ARRAY_SIZE);
ret = i40evf_config_rss(vsi, seed, lut, I40EVF_HLUT_ARRAY_SIZE);
kfree(lut);
return ret;
} }
/** /**
...@@ -1326,21 +1482,22 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter) ...@@ -1326,21 +1482,22 @@ static void i40evf_configure_rss(struct i40evf_adapter *adapter)
**/ **/
static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter) static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
{ {
int q_idx, num_q_vectors; int q_idx = 0, num_q_vectors;
struct i40e_q_vector *q_vector; struct i40e_q_vector *q_vector;
num_q_vectors = adapter->num_msix_vectors - NONQ_VECS; num_q_vectors = adapter->num_msix_vectors - NONQ_VECS;
adapter->q_vectors = kcalloc(num_q_vectors, sizeof(*q_vector),
GFP_KERNEL);
if (!adapter->q_vectors)
goto err_out;
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
q_vector = kzalloc(sizeof(*q_vector), GFP_KERNEL); q_vector = &adapter->q_vectors[q_idx];
if (!q_vector)
goto err_out;
q_vector->adapter = adapter; q_vector->adapter = adapter;
q_vector->vsi = &adapter->vsi; q_vector->vsi = &adapter->vsi;
q_vector->v_idx = q_idx; q_vector->v_idx = q_idx;
netif_napi_add(adapter->netdev, &q_vector->napi, netif_napi_add(adapter->netdev, &q_vector->napi,
i40evf_napi_poll, NAPI_POLL_WEIGHT); i40evf_napi_poll, NAPI_POLL_WEIGHT);
adapter->q_vector[q_idx] = q_vector;
} }
return 0; return 0;
...@@ -1348,11 +1505,10 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter) ...@@ -1348,11 +1505,10 @@ static int i40evf_alloc_q_vectors(struct i40evf_adapter *adapter)
err_out: err_out:
while (q_idx) { while (q_idx) {
q_idx--; q_idx--;
q_vector = adapter->q_vector[q_idx]; q_vector = &adapter->q_vectors[q_idx];
netif_napi_del(&q_vector->napi); netif_napi_del(&q_vector->napi);
kfree(q_vector);
adapter->q_vector[q_idx] = NULL;
} }
kfree(adapter->q_vectors);
return -ENOMEM; return -ENOMEM;
} }
...@@ -1373,13 +1529,11 @@ static void i40evf_free_q_vectors(struct i40evf_adapter *adapter) ...@@ -1373,13 +1529,11 @@ static void i40evf_free_q_vectors(struct i40evf_adapter *adapter)
napi_vectors = adapter->num_active_queues; napi_vectors = adapter->num_active_queues;
for (q_idx = 0; q_idx < num_q_vectors; q_idx++) { for (q_idx = 0; q_idx < num_q_vectors; q_idx++) {
struct i40e_q_vector *q_vector = adapter->q_vector[q_idx]; struct i40e_q_vector *q_vector = &adapter->q_vectors[q_idx];
adapter->q_vector[q_idx] = NULL;
if (q_idx < napi_vectors) if (q_idx < napi_vectors)
netif_napi_del(&q_vector->napi); netif_napi_del(&q_vector->napi);
kfree(q_vector);
} }
kfree(adapter->q_vectors);
} }
/** /**
...@@ -1437,6 +1591,22 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter) ...@@ -1437,6 +1591,22 @@ int i40evf_init_interrupt_scheme(struct i40evf_adapter *adapter)
return err; return err;
} }
/**
* i40evf_clear_rss_config_user - Clear user configurations of RSS
* @vsi: Pointer to VSI structure
**/
static void i40evf_clear_rss_config_user(struct i40e_vsi *vsi)
{
if (!vsi)
return;
kfree(vsi->rss_hkey_user);
vsi->rss_hkey_user = NULL;
kfree(vsi->rss_lut_user);
vsi->rss_lut_user = NULL;
}
/** /**
* i40evf_watchdog_timer - Periodic call-back timer * i40evf_watchdog_timer - Periodic call-back timer
* @data: pointer to adapter disguised as unsigned long * @data: pointer to adapter disguised as unsigned long
...@@ -1564,7 +1734,7 @@ static void i40evf_watchdog_task(struct work_struct *work) ...@@ -1564,7 +1734,7 @@ static void i40evf_watchdog_task(struct work_struct *work)
* PF, so we don't have to set current_op as we will * PF, so we don't have to set current_op as we will
* not get a response through the ARQ. * not get a response through the ARQ.
*/ */
i40evf_configure_rss(adapter); i40evf_init_rss(adapter);
adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS; adapter->aq_required &= ~I40EVF_FLAG_AQ_CONFIGURE_RSS;
goto watchdog_done; goto watchdog_done;
} }
...@@ -1864,8 +2034,8 @@ void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter) ...@@ -1864,8 +2034,8 @@ void i40evf_free_all_tx_resources(struct i40evf_adapter *adapter)
int i; int i;
for (i = 0; i < adapter->num_active_queues; i++) for (i = 0; i < adapter->num_active_queues; i++)
if (adapter->tx_rings[i]->desc) if (adapter->tx_rings[i].desc)
i40evf_free_tx_resources(adapter->tx_rings[i]); i40evf_free_tx_resources(&adapter->tx_rings[i]);
} }
/** /**
...@@ -1883,8 +2053,8 @@ static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter) ...@@ -1883,8 +2053,8 @@ static int i40evf_setup_all_tx_resources(struct i40evf_adapter *adapter)
int i, err = 0; int i, err = 0;
for (i = 0; i < adapter->num_active_queues; i++) { for (i = 0; i < adapter->num_active_queues; i++) {
adapter->tx_rings[i]->count = adapter->tx_desc_count; adapter->tx_rings[i].count = adapter->tx_desc_count;
err = i40evf_setup_tx_descriptors(adapter->tx_rings[i]); err = i40evf_setup_tx_descriptors(&adapter->tx_rings[i]);
if (!err) if (!err)
continue; continue;
dev_err(&adapter->pdev->dev, dev_err(&adapter->pdev->dev,
...@@ -1910,8 +2080,8 @@ static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter) ...@@ -1910,8 +2080,8 @@ static int i40evf_setup_all_rx_resources(struct i40evf_adapter *adapter)
int i, err = 0; int i, err = 0;
for (i = 0; i < adapter->num_active_queues; i++) { for (i = 0; i < adapter->num_active_queues; i++) {
adapter->rx_rings[i]->count = adapter->rx_desc_count; adapter->rx_rings[i].count = adapter->rx_desc_count;
err = i40evf_setup_rx_descriptors(adapter->rx_rings[i]); err = i40evf_setup_rx_descriptors(&adapter->rx_rings[i]);
if (!err) if (!err)
continue; continue;
dev_err(&adapter->pdev->dev, dev_err(&adapter->pdev->dev,
...@@ -1932,8 +2102,8 @@ void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter) ...@@ -1932,8 +2102,8 @@ void i40evf_free_all_rx_resources(struct i40evf_adapter *adapter)
int i; int i;
for (i = 0; i < adapter->num_active_queues; i++) for (i = 0; i < adapter->num_active_queues; i++)
if (adapter->rx_rings[i]->desc) if (adapter->rx_rings[i].desc)
i40evf_free_rx_resources(adapter->rx_rings[i]); i40evf_free_rx_resources(&adapter->rx_rings[i]);
} }
/** /**
...@@ -2262,6 +2432,14 @@ static void i40evf_init_task(struct work_struct *work) ...@@ -2262,6 +2432,14 @@ static void i40evf_init_task(struct work_struct *work)
if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) { if (err == I40E_ERR_ADMIN_QUEUE_NO_WORK) {
err = i40evf_send_vf_config_msg(adapter); err = i40evf_send_vf_config_msg(adapter);
goto err; goto err;
} else if (err == I40E_ERR_PARAM) {
/* We only get ERR_PARAM if the device is in a very bad
* state or if we've been disabled for previous bad
* behavior. Either way, we're done now.
*/
i40evf_shutdown_adminq(hw);
dev_err(&pdev->dev, "Unable to get VF config due to PF error condition, not retrying\n");
return;
} }
if (err) { if (err) {
dev_err(&pdev->dev, "Unable to get VF config (%d)\n", dev_err(&pdev->dev, "Unable to get VF config (%d)\n",
...@@ -2312,7 +2490,7 @@ static void i40evf_init_task(struct work_struct *work) ...@@ -2312,7 +2490,7 @@ static void i40evf_init_task(struct work_struct *work)
I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR) I40E_VIRTCHNL_VF_OFFLOAD_WB_ON_ITR)
adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE; adapter->flags |= I40EVF_FLAG_WB_ON_ITR_CAPABLE;
if (!RSS_AQ(adapter)) if (!RSS_AQ(adapter))
i40evf_configure_rss(adapter); i40evf_init_rss(adapter);
err = i40evf_request_misc_irq(adapter); err = i40evf_request_misc_irq(adapter);
if (err) if (err)
goto err_sw_init; goto err_sw_init;
...@@ -2342,7 +2520,7 @@ static void i40evf_init_task(struct work_struct *work) ...@@ -2342,7 +2520,7 @@ static void i40evf_init_task(struct work_struct *work)
adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS; adapter->aq_required |= I40EVF_FLAG_AQ_CONFIGURE_RSS;
mod_timer_pending(&adapter->watchdog_timer, jiffies + 1); mod_timer_pending(&adapter->watchdog_timer, jiffies + 1);
} else { } else {
i40evf_configure_rss(adapter); i40evf_init_rss(adapter);
} }
return; return;
restart: restart:
...@@ -2625,6 +2803,9 @@ static void i40evf_remove(struct pci_dev *pdev) ...@@ -2625,6 +2803,9 @@ static void i40evf_remove(struct pci_dev *pdev)
flush_scheduled_work(); flush_scheduled_work();
/* Clear user configurations for RSS */
i40evf_clear_rss_config_user(&adapter->vsi);
if (hw->aq.asq.count) if (hw->aq.asq.count)
i40evf_shutdown_adminq(hw); i40evf_shutdown_adminq(hw);
......
...@@ -255,19 +255,19 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter) ...@@ -255,19 +255,19 @@ void i40evf_configure_queues(struct i40evf_adapter *adapter)
for (i = 0; i < pairs; i++) { for (i = 0; i < pairs; i++) {
vqpi->txq.vsi_id = vqci->vsi_id; vqpi->txq.vsi_id = vqci->vsi_id;
vqpi->txq.queue_id = i; vqpi->txq.queue_id = i;
vqpi->txq.ring_len = adapter->tx_rings[i]->count; vqpi->txq.ring_len = adapter->tx_rings[i].count;
vqpi->txq.dma_ring_addr = adapter->tx_rings[i]->dma; vqpi->txq.dma_ring_addr = adapter->tx_rings[i].dma;
vqpi->txq.headwb_enabled = 1; vqpi->txq.headwb_enabled = 1;
vqpi->txq.dma_headwb_addr = vqpi->txq.dma_ring_addr + vqpi->txq.dma_headwb_addr = vqpi->txq.dma_ring_addr +
(vqpi->txq.ring_len * sizeof(struct i40e_tx_desc)); (vqpi->txq.ring_len * sizeof(struct i40e_tx_desc));
vqpi->rxq.vsi_id = vqci->vsi_id; vqpi->rxq.vsi_id = vqci->vsi_id;
vqpi->rxq.queue_id = i; vqpi->rxq.queue_id = i;
vqpi->rxq.ring_len = adapter->rx_rings[i]->count; vqpi->rxq.ring_len = adapter->rx_rings[i].count;
vqpi->rxq.dma_ring_addr = adapter->rx_rings[i]->dma; vqpi->rxq.dma_ring_addr = adapter->rx_rings[i].dma;
vqpi->rxq.max_pkt_size = adapter->netdev->mtu vqpi->rxq.max_pkt_size = adapter->netdev->mtu
+ ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN; + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN;
vqpi->rxq.databuffer_size = adapter->rx_rings[i]->rx_buf_len; vqpi->rxq.databuffer_size = adapter->rx_rings[i].rx_buf_len;
vqpi++; vqpi++;
} }
...@@ -360,7 +360,7 @@ void i40evf_map_queues(struct i40evf_adapter *adapter) ...@@ -360,7 +360,7 @@ void i40evf_map_queues(struct i40evf_adapter *adapter)
vimi->num_vectors = adapter->num_msix_vectors; vimi->num_vectors = adapter->num_msix_vectors;
/* Queue vectors first */ /* Queue vectors first */
for (v_idx = 0; v_idx < q_vectors; v_idx++) { for (v_idx = 0; v_idx < q_vectors; v_idx++) {
q_vector = adapter->q_vector[v_idx]; q_vector = adapter->q_vectors + v_idx;
vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id; vimi->vecmap[v_idx].vsi_id = adapter->vsi_res->vsi_id;
vimi->vecmap[v_idx].vector_id = v_idx + NONQ_VECS; vimi->vecmap[v_idx].vector_id = v_idx + NONQ_VECS;
vimi->vecmap[v_idx].txq_map = q_vector->ring_mask; vimi->vecmap[v_idx].txq_map = q_vector->ring_mask;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment