Commit dfa2eb86 authored by David S. Miller's avatar David S. Miller

Merge branch '40GbE' of git://git.kernel.org/pub/scm/linux/kernel/git/jkirsher/next-queue

Jeff Kirsher says:

====================
40GbE Intel Wired LAN Driver Updates 2016-02-19

This series contains updates to i40e/i40evf only.

Alex Duyck splits up the descriptor count function from the function that
stops the ring to have access to the descriptor count used for the data
portion of the frame.  The rewrites the logic for how we determine if we
can transmit the frame or if it needs to be linearized.  Place the checksum
close to TSO since they have a lot in common and it can help to reduce the
decision tree for how to handle the frame as the first check in TSO is to
see if checksumming is offloaded.

Carolyn adds functions to blink leds on devices using 10GBaseT PHY since
MAC registers used in other designs do not work in this device configuration.
Fixes an issue where a previously removed message has returned.

Kevin increases the timeout when checking GLGEN_RSTAT_DEVSTATE bit since
linking with particular PHY types, the amount of time it takes for the
GLGEN_RSTAT_DEVSTATE to be set increases greatly.

Neerav changes the receive queues to not wait to be disabled before DCB
has been reconfigured, like transmit queues.

Anjali adds new register definitions for programming the parser, flow
director and RSS blocks in the hardware.

Shannon adds the new opcodes and structures used for asking the firmware
to update receive control registers that need extra care when being
accessed while under heavy traffic.  Integrates the new AdminQ functions
for safely accessing the receive control registers that may be affected
by heavy small packet traffic.

Mitch provides another colorful patch description on letting go of
the stale local VSI pointer when the VF resets.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d289cbed b8f1343a
...@@ -111,6 +111,7 @@ ...@@ -111,6 +111,7 @@
#define I40E_OEM_VER_PATCH_MASK 0xff #define I40E_OEM_VER_PATCH_MASK 0xff
#define I40E_OEM_VER_BUILD_SHIFT 8 #define I40E_OEM_VER_BUILD_SHIFT 8
#define I40E_OEM_VER_SHIFT 24 #define I40E_OEM_VER_SHIFT 24
#define I40E_PHY_DEBUG_PORT BIT(4)
/* The values in here are decimal coded as hex as is the case in the NVM map*/ /* The values in here are decimal coded as hex as is the case in the NVM map*/
#define I40E_CURRENT_NVM_VERSION_HI 0x2 #define I40E_CURRENT_NVM_VERSION_HI 0x2
...@@ -355,6 +356,7 @@ struct i40e_pf { ...@@ -355,6 +356,7 @@ struct i40e_pf {
#define I40E_FLAG_NO_DCB_SUPPORT BIT_ULL(45) #define I40E_FLAG_NO_DCB_SUPPORT BIT_ULL(45)
#define I40E_FLAG_USE_SET_LLDP_MIB BIT_ULL(46) #define I40E_FLAG_USE_SET_LLDP_MIB BIT_ULL(46)
#define I40E_FLAG_STOP_FW_LLDP BIT_ULL(47) #define I40E_FLAG_STOP_FW_LLDP BIT_ULL(47)
#define I40E_FLAG_HAVE_10GBASET_PHY BIT_ULL(48)
#define I40E_FLAG_PF_MAC BIT_ULL(50) #define I40E_FLAG_PF_MAC BIT_ULL(50)
/* tracks features that get auto disabled by errors */ /* tracks features that get auto disabled by errors */
...@@ -440,6 +442,7 @@ struct i40e_pf { ...@@ -440,6 +442,7 @@ struct i40e_pf {
u32 ioremap_len; u32 ioremap_len;
u32 fd_inv; u32 fd_inv;
u16 phy_led_val;
}; };
struct i40e_mac_filter { struct i40e_mac_filter {
......
...@@ -146,6 +146,8 @@ enum i40e_admin_queue_opc { ...@@ -146,6 +146,8 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_set_port_parameters = 0x0203, i40e_aqc_opc_set_port_parameters = 0x0203,
i40e_aqc_opc_get_switch_resource_alloc = 0x0204, i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
i40e_aqc_opc_set_switch_config = 0x0205, i40e_aqc_opc_set_switch_config = 0x0205,
i40e_aqc_opc_rx_ctl_reg_read = 0x0206,
i40e_aqc_opc_rx_ctl_reg_write = 0x0207,
i40e_aqc_opc_add_vsi = 0x0210, i40e_aqc_opc_add_vsi = 0x0210,
i40e_aqc_opc_update_vsi_parameters = 0x0211, i40e_aqc_opc_update_vsi_parameters = 0x0211,
...@@ -696,6 +698,20 @@ struct i40e_aqc_set_switch_config { ...@@ -696,6 +698,20 @@ struct i40e_aqc_set_switch_config {
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config); I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config);
/* Read Receive control registers (direct 0x0206)
* Write Receive control registers (direct 0x0207)
* used for accessing Rx control registers that can be
* slow and need special handling when under high Rx load
*/
struct i40e_aqc_rx_ctl_reg_read_write {
__le32 reserved1;
__le32 address;
__le32 reserved2;
__le32 value;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_rx_ctl_reg_read_write);
/* Add VSI (indirect 0x0210) /* Add VSI (indirect 0x0210)
* this indirect command uses struct i40e_aqc_vsi_properties_data * this indirect command uses struct i40e_aqc_vsi_properties_data
* as the indirect buffer (128 bytes) * as the indirect buffer (128 bytes)
......
...@@ -1239,7 +1239,13 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw) ...@@ -1239,7 +1239,13 @@ i40e_status i40e_pf_reset(struct i40e_hw *hw)
grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) & grst_del = (rd32(hw, I40E_GLGEN_RSTCTL) &
I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >> I40E_GLGEN_RSTCTL_GRSTDEL_MASK) >>
I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT; I40E_GLGEN_RSTCTL_GRSTDEL_SHIFT;
for (cnt = 0; cnt < grst_del + 10; cnt++) {
/* It can take upto 15 secs for GRST steady state.
* Bump it to 16 secs max to be safe.
*/
grst_del = grst_del * 20;
for (cnt = 0; cnt < grst_del; cnt++) {
reg = rd32(hw, I40E_GLGEN_RSTAT); reg = rd32(hw, I40E_GLGEN_RSTAT);
if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK)) if (!(reg & I40E_GLGEN_RSTAT_DEVSTATE_MASK))
break; break;
...@@ -1887,6 +1893,32 @@ i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw, ...@@ -1887,6 +1893,32 @@ i40e_status i40e_aq_set_phy_int_mask(struct i40e_hw *hw,
return status; return status;
} }
/**
* i40e_aq_set_phy_debug
* @hw: pointer to the hw struct
* @cmd_flags: debug command flags
* @cmd_details: pointer to command details structure or NULL
*
* Reset the external PHY.
**/
enum i40e_status_code i40e_aq_set_phy_debug(struct i40e_hw *hw, u8 cmd_flags,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_set_phy_debug *cmd =
(struct i40e_aqc_set_phy_debug *)&desc.params.raw;
enum i40e_status_code status;
i40e_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_set_phy_debug);
cmd->command_flags = cmd_flags;
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
}
/** /**
* i40e_aq_add_vsi * i40e_aq_add_vsi
* @hw: pointer to the hw struct * @hw: pointer to the hw struct
...@@ -3850,7 +3882,7 @@ i40e_status i40e_set_filter_control(struct i40e_hw *hw, ...@@ -3850,7 +3882,7 @@ i40e_status i40e_set_filter_control(struct i40e_hw *hw,
return ret; return ret;
/* Read the PF Queue Filter control register */ /* Read the PF Queue Filter control register */
val = rd32(hw, I40E_PFQF_CTL_0); val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
/* Program required PE hash buckets for the PF */ /* Program required PE hash buckets for the PF */
val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK; val &= ~I40E_PFQF_CTL_0_PEHSIZE_MASK;
...@@ -3887,7 +3919,7 @@ i40e_status i40e_set_filter_control(struct i40e_hw *hw, ...@@ -3887,7 +3919,7 @@ i40e_status i40e_set_filter_control(struct i40e_hw *hw,
if (settings->enable_macvlan) if (settings->enable_macvlan)
val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK; val |= I40E_PFQF_CTL_0_MACVLAN_ENA_MASK;
wr32(hw, I40E_PFQF_CTL_0, val); i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, val);
return 0; return 0;
} }
...@@ -4214,3 +4246,454 @@ i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw, ...@@ -4214,3 +4246,454 @@ i40e_status i40e_aq_configure_partition_bw(struct i40e_hw *hw,
return status; return status;
} }
/**
* i40e_read_phy_register
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
* @phy_adr: PHY address on MDIO interface
* @value: PHY register value
*
* Reads specified PHY register value
**/
i40e_status i40e_read_phy_register(struct i40e_hw *hw,
u8 page, u16 reg, u8 phy_addr,
u16 *value)
{
i40e_status status = I40E_ERR_TIMEOUT;
u32 command = 0;
u16 retry = 1000;
u8 port_num = hw->func_caps.mdio_port_num;
command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
(page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
(I40E_MDIO_OPCODE_ADDRESS) |
(I40E_MDIO_STCODE) |
(I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
wr32(hw, I40E_GLGEN_MSCA(port_num), command);
do {
command = rd32(hw, I40E_GLGEN_MSCA(port_num));
if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
status = 0;
break;
}
usleep_range(10, 20);
retry--;
} while (retry);
if (status) {
i40e_debug(hw, I40E_DEBUG_PHY,
"PHY: Can't write command to external PHY.\n");
goto phy_read_end;
}
command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
(I40E_MDIO_OPCODE_READ) |
(I40E_MDIO_STCODE) |
(I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
status = I40E_ERR_TIMEOUT;
retry = 1000;
wr32(hw, I40E_GLGEN_MSCA(port_num), command);
do {
command = rd32(hw, I40E_GLGEN_MSCA(port_num));
if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
status = 0;
break;
}
usleep_range(10, 20);
retry--;
} while (retry);
if (!status) {
command = rd32(hw, I40E_GLGEN_MSRWD(port_num));
*value = (command & I40E_GLGEN_MSRWD_MDIRDDATA_MASK) >>
I40E_GLGEN_MSRWD_MDIRDDATA_SHIFT;
} else {
i40e_debug(hw, I40E_DEBUG_PHY,
"PHY: Can't read register value from external PHY.\n");
}
phy_read_end:
return status;
}
/**
* i40e_write_phy_register
* @hw: pointer to the HW structure
* @page: registers page number
* @reg: register address in the page
* @phy_adr: PHY address on MDIO interface
* @value: PHY register value
*
* Writes value to specified PHY register
**/
i40e_status i40e_write_phy_register(struct i40e_hw *hw,
u8 page, u16 reg, u8 phy_addr,
u16 value)
{
i40e_status status = I40E_ERR_TIMEOUT;
u32 command = 0;
u16 retry = 1000;
u8 port_num = hw->func_caps.mdio_port_num;
command = (reg << I40E_GLGEN_MSCA_MDIADD_SHIFT) |
(page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
(I40E_MDIO_OPCODE_ADDRESS) |
(I40E_MDIO_STCODE) |
(I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
wr32(hw, I40E_GLGEN_MSCA(port_num), command);
do {
command = rd32(hw, I40E_GLGEN_MSCA(port_num));
if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
status = 0;
break;
}
usleep_range(10, 20);
retry--;
} while (retry);
if (status) {
i40e_debug(hw, I40E_DEBUG_PHY,
"PHY: Can't write command to external PHY.\n");
goto phy_write_end;
}
command = value << I40E_GLGEN_MSRWD_MDIWRDATA_SHIFT;
wr32(hw, I40E_GLGEN_MSRWD(port_num), command);
command = (page << I40E_GLGEN_MSCA_DEVADD_SHIFT) |
(phy_addr << I40E_GLGEN_MSCA_PHYADD_SHIFT) |
(I40E_MDIO_OPCODE_WRITE) |
(I40E_MDIO_STCODE) |
(I40E_GLGEN_MSCA_MDICMD_MASK) |
(I40E_GLGEN_MSCA_MDIINPROGEN_MASK);
status = I40E_ERR_TIMEOUT;
retry = 1000;
wr32(hw, I40E_GLGEN_MSCA(port_num), command);
do {
command = rd32(hw, I40E_GLGEN_MSCA(port_num));
if (!(command & I40E_GLGEN_MSCA_MDICMD_MASK)) {
status = 0;
break;
}
usleep_range(10, 20);
retry--;
} while (retry);
phy_write_end:
return status;
}
/**
* i40e_get_phy_address
* @hw: pointer to the HW structure
* @dev_num: PHY port num that address we want
* @phy_addr: Returned PHY address
*
* Gets PHY address for current port
**/
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num)
{
u8 port_num = hw->func_caps.mdio_port_num;
u32 reg_val = rd32(hw, I40E_GLGEN_MDIO_I2C_SEL(port_num));
return (u8)(reg_val >> ((dev_num + 1) * 5)) & 0x1f;
}
/**
* i40e_blink_phy_led
* @hw: pointer to the HW structure
* @time: time how long led will blinks in secs
* @interval: gap between LED on and off in msecs
*
* Blinks PHY link LED
**/
i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval)
{
i40e_status status = 0;
u32 i;
u16 led_ctl;
u16 gpio_led_port;
u16 led_reg;
u16 led_addr = I40E_PHY_LED_PROV_REG_1;
u8 phy_addr = 0;
u8 port_num;
i = rd32(hw, I40E_PFGEN_PORTNUM);
port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
phy_addr = i40e_get_phy_address(hw, port_num);
for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
led_addr++) {
status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr, &led_reg);
if (status)
goto phy_blinking_end;
led_ctl = led_reg;
if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
led_reg = 0;
status = i40e_write_phy_register(hw,
I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr,
led_reg);
if (status)
goto phy_blinking_end;
break;
}
}
if (time > 0 && interval > 0) {
for (i = 0; i < time * 1000; i += interval) {
status = i40e_read_phy_register(hw,
I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr,
&led_reg);
if (status)
goto restore_config;
if (led_reg & I40E_PHY_LED_MANUAL_ON)
led_reg = 0;
else
led_reg = I40E_PHY_LED_MANUAL_ON;
status = i40e_write_phy_register(hw,
I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr,
led_reg);
if (status)
goto restore_config;
msleep(interval);
}
}
restore_config:
status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
phy_addr, led_ctl);
phy_blinking_end:
return status;
}
/**
* i40e_led_get_phy - return current on/off mode
* @hw: pointer to the hw struct
* @led_addr: address of led register to use
* @val: original value of register to use
*
**/
i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
u16 *val)
{
i40e_status status = 0;
u16 gpio_led_port;
u8 phy_addr = 0;
u16 reg_val;
u16 temp_addr;
u8 port_num;
u32 i;
temp_addr = I40E_PHY_LED_PROV_REG_1;
i = rd32(hw, I40E_PFGEN_PORTNUM);
port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
phy_addr = i40e_get_phy_address(hw, port_num);
for (gpio_led_port = 0; gpio_led_port < 3; gpio_led_port++,
temp_addr++) {
status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
temp_addr, phy_addr, &reg_val);
if (status)
return status;
*val = reg_val;
if (reg_val & I40E_PHY_LED_LINK_MODE_MASK) {
*led_addr = temp_addr;
break;
}
}
return status;
}
/**
* i40e_led_set_phy
* @hw: pointer to the HW structure
* @on: true or false
* @mode: original val plus bit for set or ignore
* Set led's on or off when controlled by the PHY
*
**/
i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on,
u16 led_addr, u32 mode)
{
i40e_status status = 0;
u16 led_ctl = 0;
u16 led_reg = 0;
u8 phy_addr = 0;
u8 port_num;
u32 i;
i = rd32(hw, I40E_PFGEN_PORTNUM);
port_num = (u8)(i & I40E_PFGEN_PORTNUM_PORT_NUM_MASK);
phy_addr = i40e_get_phy_address(hw, port_num);
status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
phy_addr, &led_reg);
if (status)
return status;
led_ctl = led_reg;
if (led_reg & I40E_PHY_LED_LINK_MODE_MASK) {
led_reg = 0;
status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr, led_reg);
if (status)
return status;
}
status = i40e_read_phy_register(hw, I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr, &led_reg);
if (status)
goto restore_config;
if (on)
led_reg = I40E_PHY_LED_MANUAL_ON;
else
led_reg = 0;
status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr, led_reg);
if (status)
goto restore_config;
if (mode & I40E_PHY_LED_MODE_ORIG) {
led_ctl = (mode & I40E_PHY_LED_MODE_MASK);
status = i40e_write_phy_register(hw,
I40E_PHY_COM_REG_PAGE,
led_addr, phy_addr, led_ctl);
}
return status;
restore_config:
status = i40e_write_phy_register(hw, I40E_PHY_COM_REG_PAGE, led_addr,
phy_addr, led_ctl);
return status;
}
/**
* i40e_aq_rx_ctl_read_register - use FW to read from an Rx control register
* @hw: pointer to the hw struct
* @reg_addr: register address
* @reg_val: ptr to register value
* @cmd_details: pointer to command details structure or NULL
*
* Use the firmware to read the Rx control register,
* especially useful if the Rx unit is under heavy pressure
**/
i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
u32 reg_addr, u32 *reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp =
(struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
i40e_status status;
if (!reg_val)
return I40E_ERR_PARAM;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_read);
cmd_resp->address = cpu_to_le32(reg_addr);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
if (status == 0)
*reg_val = le32_to_cpu(cmd_resp->value);
return status;
}
/**
* i40e_read_rx_ctl - read from an Rx control register
* @hw: pointer to the hw struct
* @reg_addr: register address
**/
u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
{
i40e_status status = 0;
bool use_register;
int retry = 5;
u32 val = 0;
use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
if (!use_register) {
do_retry:
status = i40e_aq_rx_ctl_read_register(hw, reg_addr, &val, NULL);
if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
usleep_range(1000, 2000);
retry--;
goto do_retry;
}
}
/* if the AQ access failed, try the old-fashioned way */
if (status || use_register)
val = rd32(hw, reg_addr);
return val;
}
/**
* i40e_aq_rx_ctl_write_register
* @hw: pointer to the hw struct
* @reg_addr: register address
* @reg_val: register value
* @cmd_details: pointer to command details structure or NULL
*
* Use the firmware to write to an Rx control register,
* especially useful if the Rx unit is under heavy pressure
**/
i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_rx_ctl_reg_read_write *cmd =
(struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
i40e_status status;
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_rx_ctl_reg_write);
cmd->address = cpu_to_le32(reg_addr);
cmd->value = cpu_to_le32(reg_val);
status = i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
}
/**
* i40e_write_rx_ctl - write to an Rx control register
* @hw: pointer to the hw struct
* @reg_addr: register address
* @reg_val: register value
**/
void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
{
i40e_status status = 0;
bool use_register;
int retry = 5;
use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
if (!use_register) {
do_retry:
status = i40e_aq_rx_ctl_write_register(hw, reg_addr,
reg_val, NULL);
if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
usleep_range(1000, 2000);
retry--;
goto do_retry;
}
}
/* if the AQ access failed, try the old-fashioned way */
if (status || use_register)
wr32(hw, reg_addr, reg_val);
}
...@@ -1826,27 +1826,51 @@ static int i40e_set_phys_id(struct net_device *netdev, ...@@ -1826,27 +1826,51 @@ static int i40e_set_phys_id(struct net_device *netdev,
enum ethtool_phys_id_state state) enum ethtool_phys_id_state state)
{ {
struct i40e_netdev_priv *np = netdev_priv(netdev); struct i40e_netdev_priv *np = netdev_priv(netdev);
i40e_status ret = 0;
struct i40e_pf *pf = np->vsi->back; struct i40e_pf *pf = np->vsi->back;
struct i40e_hw *hw = &pf->hw; struct i40e_hw *hw = &pf->hw;
int blink_freq = 2; int blink_freq = 2;
u16 temp_status;
switch (state) { switch (state) {
case ETHTOOL_ID_ACTIVE: case ETHTOOL_ID_ACTIVE:
if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY)) {
pf->led_status = i40e_led_get(hw); pf->led_status = i40e_led_get(hw);
} else {
i40e_aq_set_phy_debug(hw, I40E_PHY_DEBUG_PORT, NULL);
ret = i40e_led_get_phy(hw, &temp_status,
&pf->phy_led_val);
pf->led_status = temp_status;
}
return blink_freq; return blink_freq;
case ETHTOOL_ID_ON: case ETHTOOL_ID_ON:
i40e_led_set(hw, 0xF, false); if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY))
i40e_led_set(hw, 0xf, false);
else
ret = i40e_led_set_phy(hw, true, pf->led_status, 0);
break; break;
case ETHTOOL_ID_OFF: case ETHTOOL_ID_OFF:
if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY))
i40e_led_set(hw, 0x0, false); i40e_led_set(hw, 0x0, false);
else
ret = i40e_led_set_phy(hw, false, pf->led_status, 0);
break; break;
case ETHTOOL_ID_INACTIVE: case ETHTOOL_ID_INACTIVE:
i40e_led_set(hw, pf->led_status, false); if (!(pf->flags & I40E_FLAG_HAVE_10GBASET_PHY)) {
i40e_led_set(hw, false, pf->led_status);
} else {
ret = i40e_led_set_phy(hw, false, pf->led_status,
(pf->phy_led_val |
I40E_PHY_LED_MODE_ORIG));
i40e_aq_set_phy_debug(hw, 0, NULL);
}
break; break;
default: default:
break; break;
} }
if (ret)
return -ENOENT;
else
return 0; return 0;
} }
...@@ -2157,8 +2181,8 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd, ...@@ -2157,8 +2181,8 @@ static int i40e_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
{ {
struct i40e_hw *hw = &pf->hw; struct i40e_hw *hw = &pf->hw;
u64 hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) | u64 hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32); ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
/* RSS does not support anything other than hashing /* RSS does not support anything other than hashing
* to queues on src and dst IPs and ports * to queues on src and dst IPs and ports
...@@ -2267,8 +2291,8 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc) ...@@ -2267,8 +2291,8 @@ static int i40e_set_rss_hash_opt(struct i40e_pf *pf, struct ethtool_rxnfc *nfc)
return -EINVAL; return -EINVAL;
} }
wr32(hw, I40E_PFQF_HENA(0), (u32)hena); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
i40e_flush(hw); i40e_flush(hw);
/* Save setting for future output/update */ /* Save setting for future output/update */
......
...@@ -295,11 +295,11 @@ void i40e_init_pf_fcoe(struct i40e_pf *pf) ...@@ -295,11 +295,11 @@ void i40e_init_pf_fcoe(struct i40e_pf *pf)
} }
/* enable FCoE hash filter */ /* enable FCoE hash filter */
val = rd32(hw, I40E_PFQF_HENA(1)); val = i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1));
val |= BIT(I40E_FILTER_PCTYPE_FCOE_OX - 32); val |= BIT(I40E_FILTER_PCTYPE_FCOE_OX - 32);
val |= BIT(I40E_FILTER_PCTYPE_FCOE_RX - 32); val |= BIT(I40E_FILTER_PCTYPE_FCOE_RX - 32);
val &= I40E_PFQF_HENA_PTYPE_ENA_MASK; val &= I40E_PFQF_HENA_PTYPE_ENA_MASK;
wr32(hw, I40E_PFQF_HENA(1), val); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), val);
/* enable flag */ /* enable flag */
pf->flags |= I40E_FLAG_FCOE_ENABLED; pf->flags |= I40E_FLAG_FCOE_ENABLED;
...@@ -317,11 +317,11 @@ void i40e_init_pf_fcoe(struct i40e_pf *pf) ...@@ -317,11 +317,11 @@ void i40e_init_pf_fcoe(struct i40e_pf *pf)
pf->filter_settings.fcoe_cntx_num = I40E_DMA_CNTX_SIZE_4K; pf->filter_settings.fcoe_cntx_num = I40E_DMA_CNTX_SIZE_4K;
/* Setup max frame with FCoE_MTU plus L2 overheads */ /* Setup max frame with FCoE_MTU plus L2 overheads */
val = rd32(hw, I40E_GLFCOE_RCTL); val = i40e_read_rx_ctl(hw, I40E_GLFCOE_RCTL);
val &= ~I40E_GLFCOE_RCTL_MAX_SIZE_MASK; val &= ~I40E_GLFCOE_RCTL_MAX_SIZE_MASK;
val |= ((FCOE_MTU + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN) val |= ((FCOE_MTU + ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN)
<< I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT); << I40E_GLFCOE_RCTL_MAX_SIZE_SHIFT);
wr32(hw, I40E_GLFCOE_RCTL, val); i40e_write_rx_ctl(hw, I40E_GLFCOE_RCTL, val);
dev_info(&pf->pdev->dev, "FCoE is supported.\n"); dev_info(&pf->pdev->dev, "FCoE is supported.\n");
} }
...@@ -1359,16 +1359,32 @@ static netdev_tx_t i40e_fcoe_xmit_frame(struct sk_buff *skb, ...@@ -1359,16 +1359,32 @@ static netdev_tx_t i40e_fcoe_xmit_frame(struct sk_buff *skb,
struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping]; struct i40e_ring *tx_ring = vsi->tx_rings[skb->queue_mapping];
struct i40e_tx_buffer *first; struct i40e_tx_buffer *first;
u32 tx_flags = 0; u32 tx_flags = 0;
int fso, count;
u8 hdr_len = 0; u8 hdr_len = 0;
u8 sof = 0; u8 sof = 0;
u8 eof = 0; u8 eof = 0;
int fso;
if (i40e_fcoe_set_skb_header(skb)) if (i40e_fcoe_set_skb_header(skb))
goto out_drop; goto out_drop;
if (!i40e_xmit_descriptor_count(skb, tx_ring)) count = i40e_xmit_descriptor_count(skb);
if (i40e_chk_linearize(skb, count)) {
if (__skb_linearize(skb))
goto out_drop;
count = TXD_USE_COUNT(skb->len);
tx_ring->tx_stats.tx_linearize++;
}
/* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
* + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
* + 4 desc gap to avoid the cache line where head is,
* + 1 desc for context descriptor,
* otherwise try next time
*/
if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
tx_ring->tx_stats.tx_busy++;
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
}
/* prepare the xmit flags */ /* prepare the xmit flags */
if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
......
...@@ -46,7 +46,7 @@ static const char i40e_driver_string[] = ...@@ -46,7 +46,7 @@ static const char i40e_driver_string[] =
#define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 4 #define DRV_VERSION_MINOR 4
#define DRV_VERSION_BUILD 15 #define DRV_VERSION_BUILD 25
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) DRV_KERN __stringify(DRV_VERSION_BUILD) DRV_KERN
...@@ -3929,6 +3929,9 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable) ...@@ -3929,6 +3929,9 @@ static int i40e_vsi_control_rx(struct i40e_vsi *vsi, bool enable)
else else
rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK; rx_reg &= ~I40E_QRX_ENA_QENA_REQ_MASK;
wr32(hw, I40E_QRX_ENA(pf_q), rx_reg); wr32(hw, I40E_QRX_ENA(pf_q), rx_reg);
/* No waiting for the Tx queue to disable */
if (!enable && test_bit(__I40E_PORT_TX_SUSPENDED, &pf->state))
continue;
/* wait for the change to finish */ /* wait for the change to finish */
ret = i40e_pf_rxq_wait(pf, pf_q, enable); ret = i40e_pf_rxq_wait(pf, pf_q, enable);
...@@ -4287,12 +4290,12 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf) ...@@ -4287,12 +4290,12 @@ static void i40e_pf_unquiesce_all_vsi(struct i40e_pf *pf)
#ifdef CONFIG_I40E_DCB #ifdef CONFIG_I40E_DCB
/** /**
* i40e_vsi_wait_txq_disabled - Wait for VSI's queues to be disabled * i40e_vsi_wait_queues_disabled - Wait for VSI's queues to be disabled
* @vsi: the VSI being configured * @vsi: the VSI being configured
* *
* This function waits for the given VSI's Tx queues to be disabled. * This function waits for the given VSI's queues to be disabled.
**/ **/
static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi) static int i40e_vsi_wait_queues_disabled(struct i40e_vsi *vsi)
{ {
struct i40e_pf *pf = vsi->back; struct i40e_pf *pf = vsi->back;
int i, pf_q, ret; int i, pf_q, ret;
...@@ -4309,24 +4312,36 @@ static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi) ...@@ -4309,24 +4312,36 @@ static int i40e_vsi_wait_txq_disabled(struct i40e_vsi *vsi)
} }
} }
pf_q = vsi->base_queue;
for (i = 0; i < vsi->num_queue_pairs; i++, pf_q++) {
/* Check and wait for the disable status of the queue */
ret = i40e_pf_rxq_wait(pf, pf_q, false);
if (ret) {
dev_info(&pf->pdev->dev,
"VSI seid %d Rx ring %d disable timeout\n",
vsi->seid, pf_q);
return ret;
}
}
return 0; return 0;
} }
/** /**
* i40e_pf_wait_txq_disabled - Wait for all queues of PF VSIs to be disabled * i40e_pf_wait_queues_disabled - Wait for all queues of PF VSIs to be disabled
* @pf: the PF * @pf: the PF
* *
* This function waits for the Tx queues to be in disabled state for all the * This function waits for the queues to be in disabled state for all the
* VSIs that are managed by this PF. * VSIs that are managed by this PF.
**/ **/
static int i40e_pf_wait_txq_disabled(struct i40e_pf *pf) static int i40e_pf_wait_queues_disabled(struct i40e_pf *pf)
{ {
int v, ret = 0; int v, ret = 0;
for (v = 0; v < pf->hw.func_caps.num_vsis; v++) { for (v = 0; v < pf->hw.func_caps.num_vsis; v++) {
/* No need to wait for FCoE VSI queues */ /* No need to wait for FCoE VSI queues */
if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) { if (pf->vsi[v] && pf->vsi[v]->type != I40E_VSI_FCOE) {
ret = i40e_vsi_wait_txq_disabled(pf->vsi[v]); ret = i40e_vsi_wait_queues_disabled(pf->vsi[v]);
if (ret) if (ret)
break; break;
} }
...@@ -5726,8 +5741,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf, ...@@ -5726,8 +5741,8 @@ static int i40e_handle_lldp_event(struct i40e_pf *pf,
if (ret) if (ret)
goto exit; goto exit;
/* Wait for the PF's Tx queues to be disabled */ /* Wait for the PF's queues to be disabled */
ret = i40e_pf_wait_txq_disabled(pf); ret = i40e_pf_wait_queues_disabled(pf);
if (ret) { if (ret) {
/* Schedule PF reset to recover */ /* Schedule PF reset to recover */
set_bit(__I40E_PF_RESET_REQUESTED, &pf->state); set_bit(__I40E_PF_RESET_REQUESTED, &pf->state);
...@@ -7094,8 +7109,9 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf) ...@@ -7094,8 +7109,9 @@ static void i40e_sync_udp_filters_subtask(struct i40e_pf *pf)
ret = i40e_aq_del_udp_tunnel(hw, i, NULL); ret = i40e_aq_del_udp_tunnel(hw, i, NULL);
if (ret) { if (ret) {
dev_info(&pf->pdev->dev, dev_dbg(&pf->pdev->dev,
"%s vxlan port %d, index %d failed, err %s aq_err %s\n", "%s %s port %d, index %d failed, err %s aq_err %s\n",
pf->udp_ports[i].type ? "vxlan" : "geneve",
port ? "add" : "delete", port ? "add" : "delete",
ntohs(port), i, ntohs(port), i,
i40e_stat_str(&pf->hw, ret), i40e_stat_str(&pf->hw, ret),
...@@ -8016,7 +8032,7 @@ static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed, ...@@ -8016,7 +8032,7 @@ static int i40e_config_rss_reg(struct i40e_vsi *vsi, const u8 *seed,
u32 *seed_dw = (u32 *)seed; u32 *seed_dw = (u32 *)seed;
for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
wr32(hw, I40E_PFQF_HKEY(i), seed_dw[i]); i40e_write_rx_ctl(hw, I40E_PFQF_HKEY(i), seed_dw[i]);
} }
if (lut) { if (lut) {
...@@ -8053,7 +8069,7 @@ static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed, ...@@ -8053,7 +8069,7 @@ static int i40e_get_rss_reg(struct i40e_vsi *vsi, u8 *seed,
u32 *seed_dw = (u32 *)seed; u32 *seed_dw = (u32 *)seed;
for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++) for (i = 0; i <= I40E_PFQF_HKEY_MAX_INDEX; i++)
seed_dw[i] = rd32(hw, I40E_PFQF_HKEY(i)); seed_dw[i] = i40e_read_rx_ctl(hw, I40E_PFQF_HKEY(i));
} }
if (lut) { if (lut) {
u32 *lut_dw = (u32 *)lut; u32 *lut_dw = (u32 *)lut;
...@@ -8136,19 +8152,19 @@ static int i40e_pf_config_rss(struct i40e_pf *pf) ...@@ -8136,19 +8152,19 @@ static int i40e_pf_config_rss(struct i40e_pf *pf)
int ret; int ret;
/* By default we enable TCP/UDP with IPv4/IPv6 ptypes */ /* By default we enable TCP/UDP with IPv4/IPv6 ptypes */
hena = (u64)rd32(hw, I40E_PFQF_HENA(0)) | hena = (u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(0)) |
((u64)rd32(hw, I40E_PFQF_HENA(1)) << 32); ((u64)i40e_read_rx_ctl(hw, I40E_PFQF_HENA(1)) << 32);
hena |= i40e_pf_get_default_rss_hena(pf); hena |= i40e_pf_get_default_rss_hena(pf);
wr32(hw, I40E_PFQF_HENA(0), (u32)hena); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), (u32)hena);
wr32(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32)); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), (u32)(hena >> 32));
/* Determine the RSS table size based on the hardware capabilities */ /* Determine the RSS table size based on the hardware capabilities */
reg_val = rd32(hw, I40E_PFQF_CTL_0); reg_val = i40e_read_rx_ctl(hw, I40E_PFQF_CTL_0);
reg_val = (pf->rss_table_size == 512) ? reg_val = (pf->rss_table_size == 512) ?
(reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) : (reg_val | I40E_PFQF_CTL_0_HASHLUTSIZE_512) :
(reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512); (reg_val & ~I40E_PFQF_CTL_0_HASHLUTSIZE_512);
wr32(hw, I40E_PFQF_CTL_0, reg_val); i40e_write_rx_ctl(hw, I40E_PFQF_CTL_0, reg_val);
/* Determine the RSS size of the VSI */ /* Determine the RSS size of the VSI */
if (!vsi->rss_size) if (!vsi->rss_size)
...@@ -9567,10 +9583,15 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi) ...@@ -9567,10 +9583,15 @@ static int i40e_vsi_setup_vectors(struct i40e_vsi *vsi)
**/ **/
static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi) static struct i40e_vsi *i40e_vsi_reinit_setup(struct i40e_vsi *vsi)
{ {
struct i40e_pf *pf = vsi->back; struct i40e_pf *pf;
u8 enabled_tc; u8 enabled_tc;
int ret; int ret;
if (!vsi)
return NULL;
pf = vsi->back;
i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx); i40e_put_lump(pf->qp_pile, vsi->base_queue, vsi->idx);
i40e_vsi_clear_rings(vsi); i40e_vsi_clear_rings(vsi);
...@@ -11130,6 +11151,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ...@@ -11130,6 +11151,10 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw, i40e_add_filter_to_drop_tx_flow_control_frames(&pf->hw,
pf->main_vsi_seid); pf->main_vsi_seid);
if ((pf->hw.device_id == I40E_DEV_ID_10G_BASE_T) ||
(pf->hw.device_id == I40E_DEV_ID_10G_BASE_T4))
pf->flags |= I40E_FLAG_HAVE_10GBASET_PHY;
/* print a string summarizing features */ /* print a string summarizing features */
i40e_print_features(pf); i40e_print_features(pf);
...@@ -11186,10 +11211,11 @@ static void i40e_remove(struct pci_dev *pdev) ...@@ -11186,10 +11211,11 @@ static void i40e_remove(struct pci_dev *pdev)
i40e_ptp_stop(pf); i40e_ptp_stop(pf);
/* Disable RSS in hw */ /* Disable RSS in hw */
wr32(hw, I40E_PFQF_HENA(0), 0); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(0), 0);
wr32(hw, I40E_PFQF_HENA(1), 0); i40e_write_rx_ctl(hw, I40E_PFQF_HENA(1), 0);
/* no more scheduling of any task */ /* no more scheduling of any task */
set_bit(__I40E_SUSPENDED, &pf->state);
set_bit(__I40E_DOWN, &pf->state); set_bit(__I40E_DOWN, &pf->state);
del_timer_sync(&pf->service_timer); del_timer_sync(&pf->service_timer);
cancel_work_sync(&pf->service_task); cancel_work_sync(&pf->service_task);
......
...@@ -74,6 +74,12 @@ i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw, ...@@ -74,6 +74,12 @@ i40e_status i40e_aq_set_rss_key(struct i40e_hw *hw,
u32 i40e_led_get(struct i40e_hw *hw); u32 i40e_led_get(struct i40e_hw *hw);
void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink); void i40e_led_set(struct i40e_hw *hw, u32 mode, bool blink);
i40e_status i40e_led_set_phy(struct i40e_hw *hw, bool on,
u16 led_addr, u32 mode);
i40e_status i40e_led_get_phy(struct i40e_hw *hw, u16 *led_addr,
u16 *val);
i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
/* admin send queue commands */ /* admin send queue commands */
...@@ -336,4 +342,19 @@ i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id, ...@@ -336,4 +342,19 @@ i40e_status i40e_aq_debug_dump(struct i40e_hw *hw, u8 cluster_id,
struct i40e_asq_cmd_details *cmd_details); struct i40e_asq_cmd_details *cmd_details);
void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
u16 vsi_seid); u16 vsi_seid);
i40e_status i40e_aq_rx_ctl_read_register(struct i40e_hw *hw,
u32 reg_addr, u32 *reg_val,
struct i40e_asq_cmd_details *cmd_details);
u32 i40e_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr);
i40e_status i40e_aq_rx_ctl_write_register(struct i40e_hw *hw,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details);
void i40e_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page,
u16 reg, u8 phy_addr, u16 *value);
i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page,
u16 reg, u8 phy_addr, u16 value);
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
#endif /* _I40E_PROTOTYPE_H_ */ #endif /* _I40E_PROTOTYPE_H_ */
...@@ -2045,6 +2045,14 @@ ...@@ -2045,6 +2045,14 @@
#define I40E_PRTPM_TLPIC 0x001E43C0 /* Reset: GLOBR */ #define I40E_PRTPM_TLPIC 0x001E43C0 /* Reset: GLOBR */
#define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0 #define I40E_PRTPM_TLPIC_ETLPIC_SHIFT 0
#define I40E_PRTPM_TLPIC_ETLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_TLPIC_ETLPIC_SHIFT) #define I40E_PRTPM_TLPIC_ETLPIC_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTPM_TLPIC_ETLPIC_SHIFT)
#define I40E_GL_PRS_FVBM(_i) (0x00269760 + ((_i) * 4)) /* _i=0...3 */ /* Reset: CORER */
#define I40E_GL_PRS_FVBM_MAX_INDEX 3
#define I40E_GL_PRS_FVBM_FV_BYTE_INDX_SHIFT 0
#define I40E_GL_PRS_FVBM_FV_BYTE_INDX_MASK I40E_MASK(0x7F, I40E_GL_PRS_FVBM_FV_BYTE_INDX_SHIFT)
#define I40E_GL_PRS_FVBM_RULE_BUS_INDX_SHIFT 8
#define I40E_GL_PRS_FVBM_RULE_BUS_INDX_MASK I40E_MASK(0x3F, I40E_GL_PRS_FVBM_RULE_BUS_INDX_SHIFT)
#define I40E_GL_PRS_FVBM_MSK_ENA_SHIFT 31
#define I40E_GL_PRS_FVBM_MSK_ENA_MASK I40E_MASK(0x1, I40E_GL_PRS_FVBM_MSK_ENA_SHIFT)
#define I40E_GLRPB_DPSS 0x000AC828 /* Reset: CORER */ #define I40E_GLRPB_DPSS 0x000AC828 /* Reset: CORER */
#define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0 #define I40E_GLRPB_DPSS_DPS_TCN_SHIFT 0
#define I40E_GLRPB_DPSS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_DPSS_DPS_TCN_SHIFT) #define I40E_GLRPB_DPSS_DPS_TCN_MASK I40E_MASK(0xFFFFF, I40E_GLRPB_DPSS_DPS_TCN_SHIFT)
...@@ -2216,6 +2224,14 @@ ...@@ -2216,6 +2224,14 @@
#define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63 #define I40E_PRTQF_FD_FLXINSET_MAX_INDEX 63
#define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0 #define I40E_PRTQF_FD_FLXINSET_INSET_SHIFT 0
#define I40E_PRTQF_FD_FLXINSET_INSET_MASK I40E_MASK(0xFF, I40E_PRTQF_FD_FLXINSET_INSET_SHIFT) #define I40E_PRTQF_FD_FLXINSET_INSET_MASK I40E_MASK(0xFF, I40E_PRTQF_FD_FLXINSET_INSET_SHIFT)
#define I40E_PRTQF_FD_INSET(_i, _j) (0x00250000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
#define I40E_PRTQF_FD_INSET_MAX_INDEX 63
#define I40E_PRTQF_FD_INSET_INSET_SHIFT 0
#define I40E_PRTQF_FD_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTQF_FD_INSET_INSET_SHIFT)
#define I40E_PRTQF_FD_INSET(_i, _j) (0x00250000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
#define I40E_PRTQF_FD_INSET_MAX_INDEX 63
#define I40E_PRTQF_FD_INSET_INSET_SHIFT 0
#define I40E_PRTQF_FD_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_PRTQF_FD_INSET_INSET_SHIFT)
#define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */ #define I40E_PRTQF_FD_MSK(_i, _j) (0x00252000 + ((_i) * 64 + (_j) * 32)) /* _i=0...63, _j=0...1 */ /* Reset: CORER */
#define I40E_PRTQF_FD_MSK_MAX_INDEX 63 #define I40E_PRTQF_FD_MSK_MAX_INDEX 63
#define I40E_PRTQF_FD_MSK_MASK_SHIFT 0 #define I40E_PRTQF_FD_MSK_MASK_SHIFT 0
...@@ -5155,6 +5171,38 @@ ...@@ -5155,6 +5171,38 @@
#define I40E_GLQF_FD_PCTYPES_MAX_INDEX 63 #define I40E_GLQF_FD_PCTYPES_MAX_INDEX 63
#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT 0 #define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT 0
#define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_MASK I40E_MASK(0x3F, I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT) #define I40E_GLQF_FD_PCTYPES_FD_PCTYPE_MASK I40E_MASK(0x3F, I40E_GLQF_FD_PCTYPES_FD_PCTYPE_SHIFT)
#define I40E_GLQF_FD_MSK(_i, _j) (0x00267200 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
#define I40E_GLQF_FD_MSK_MAX_INDEX 1
#define I40E_GLQF_FD_MSK_MASK_SHIFT 0
#define I40E_GLQF_FD_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_GLQF_FD_MSK_MASK_SHIFT)
#define I40E_GLQF_FD_MSK_OFFSET_SHIFT 16
#define I40E_GLQF_FD_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_GLQF_FD_MSK_OFFSET_SHIFT)
#define I40E_GLQF_HASH_INSET(_i, _j) (0x00267600 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
#define I40E_GLQF_HASH_INSET_MAX_INDEX 1
#define I40E_GLQF_HASH_INSET_INSET_SHIFT 0
#define I40E_GLQF_HASH_INSET_INSET_MASK I40E_MASK(0xFFFFFFFF, I40E_GLQF_HASH_INSET_INSET_SHIFT)
#define I40E_GLQF_HASH_MSK(_i, _j) (0x00267A00 + ((_i) * 4 + (_j) * 8)) /* _i=0...1, _j=0...63 */ /* Reset: CORER */
#define I40E_GLQF_HASH_MSK_MAX_INDEX 1
#define I40E_GLQF_HASH_MSK_MASK_SHIFT 0
#define I40E_GLQF_HASH_MSK_MASK_MASK I40E_MASK(0xFFFF, I40E_GLQF_HASH_MSK_MASK_SHIFT)
#define I40E_GLQF_HASH_MSK_OFFSET_SHIFT 16
#define I40E_GLQF_HASH_MSK_OFFSET_MASK I40E_MASK(0x3F, I40E_GLQF_HASH_MSK_OFFSET_SHIFT)
#define I40E_GLQF_ORT(_i) (0x00268900 + ((_i) * 4)) /* _i=0...63 */ /* Reset: CORER */
#define I40E_GLQF_ORT_MAX_INDEX 63
#define I40E_GLQF_ORT_PIT_INDX_SHIFT 0
#define I40E_GLQF_ORT_PIT_INDX_MASK I40E_MASK(0x1F, I40E_GLQF_ORT_PIT_INDX_SHIFT)
#define I40E_GLQF_ORT_FIELD_CNT_SHIFT 5
#define I40E_GLQF_ORT_FIELD_CNT_MASK I40E_MASK(0x3, I40E_GLQF_ORT_FIELD_CNT_SHIFT)
#define I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT 7
#define I40E_GLQF_ORT_FLX_PAYLOAD_MASK I40E_MASK(0x1, I40E_GLQF_ORT_FLX_PAYLOAD_SHIFT)
#define I40E_GLQF_PIT(_i) (0x00268C80 + ((_i) * 4)) /* _i=0...23 */ /* Reset: CORER */
#define I40E_GLQF_PIT_MAX_INDEX 23
#define I40E_GLQF_PIT_SOURCE_OFF_SHIFT 0
#define I40E_GLQF_PIT_SOURCE_OFF_MASK I40E_MASK(0x1F, I40E_GLQF_PIT_SOURCE_OFF_SHIFT)
#define I40E_GLQF_PIT_FSIZE_SHIFT 5
#define I40E_GLQF_PIT_FSIZE_MASK I40E_MASK(0x1F, I40E_GLQF_PIT_FSIZE_SHIFT)
#define I40E_GLQF_PIT_DEST_OFF_SHIFT 10
#define I40E_GLQF_PIT_DEST_OFF_MASK I40E_MASK(0x3F, I40E_GLQF_PIT_DEST_OFF_SHIFT)
#define I40E_GLQF_FDEVICTENA(_i) (0x00270384 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */ #define I40E_GLQF_FDEVICTENA(_i) (0x00270384 + ((_i) * 4)) /* _i=0...1 */ /* Reset: CORER */
#define I40E_GLQF_FDEVICTENA_MAX_INDEX 1 #define I40E_GLQF_FDEVICTENA_MAX_INDEX 1
#define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT 0 #define I40E_GLQF_FDEVICTENA_GLQF_FDEVICTENA_SHIFT 0
......
...@@ -2576,7 +2576,7 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, ...@@ -2576,7 +2576,7 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
* *
* Returns -EBUSY if a stop is needed, else 0 * Returns -EBUSY if a stop is needed, else 0
**/ **/
static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
{ {
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
/* Memory barrier before checking head and tail */ /* Memory barrier before checking head and tail */
...@@ -2593,77 +2593,71 @@ static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size) ...@@ -2593,77 +2593,71 @@ static inline int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
} }
/** /**
* i40e_maybe_stop_tx - 1st level check for tx stop conditions * __i40e_chk_linearize - Check if there are more than 8 fragments per packet
* @tx_ring: the ring to be checked
* @size: the size buffer we want to assure is available
*
* Returns 0 if stop is not needed
**/
#ifdef I40E_FCOE
inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
#else
static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
#endif
{
if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
return 0;
return __i40e_maybe_stop_tx(tx_ring, size);
}
/**
* i40e_chk_linearize - Check if there are more than 8 fragments per packet
* @skb: send buffer * @skb: send buffer
* @tx_flags: collected send information
* *
* Note: Our HW can't scatter-gather more than 8 fragments to build * Note: Our HW can't scatter-gather more than 8 fragments to build
* a packet on the wire and so we need to figure out the cases where we * a packet on the wire and so we need to figure out the cases where we
* need to linearize the skb. * need to linearize the skb.
**/ **/
static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags) bool __i40e_chk_linearize(struct sk_buff *skb)
{ {
struct skb_frag_struct *frag; const struct skb_frag_struct *frag, *stale;
bool linearize = false; int gso_size, nr_frags, sum;
unsigned int size = 0;
u16 num_frags;
u16 gso_segs;
num_frags = skb_shinfo(skb)->nr_frags; /* check to see if TSO is enabled, if so we may get a repreive */
gso_segs = skb_shinfo(skb)->gso_segs; gso_size = skb_shinfo(skb)->gso_size;
if (unlikely(!gso_size))
return true;
if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { /* no need to check if number of frags is less than 8 */
u16 j = 0; nr_frags = skb_shinfo(skb)->nr_frags;
if (nr_frags < I40E_MAX_BUFFER_TXD)
return false;
if (num_frags < (I40E_MAX_BUFFER_TXD)) /* We need to walk through the list and validate that each group
goto linearize_chk_done; * of 6 fragments totals at least gso_size. However we don't need
/* try the simple math, if we have too many frags per segment */ * to perform such validation on the first or last 6 since the first
if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) > * 6 cannot inherit any data from a descriptor before them, and the
I40E_MAX_BUFFER_TXD) { * last 6 cannot inherit any data from a descriptor after them.
linearize = true; */
goto linearize_chk_done; nr_frags -= I40E_MAX_BUFFER_TXD - 1;
}
frag = &skb_shinfo(skb)->frags[0]; frag = &skb_shinfo(skb)->frags[0];
/* we might still have more fragments per segment */
do { /* Initialize size to the negative value of gso_size minus 1. We
size += skb_frag_size(frag); * use this as the worst case scenerio in which the frag ahead
frag++; j++; * of us only provides one byte which is why we are limited to 6
if ((size >= skb_shinfo(skb)->gso_size) && * descriptors for a single transmit as the header and previous
(j < I40E_MAX_BUFFER_TXD)) { * fragment are already consuming 2 descriptors.
size = (size % skb_shinfo(skb)->gso_size); */
j = (size) ? 1 : 0; sum = 1 - gso_size;
}
if (j == I40E_MAX_BUFFER_TXD) { /* Add size of frags 1 through 5 to create our initial sum */
linearize = true; sum += skb_frag_size(++frag);
sum += skb_frag_size(++frag);
sum += skb_frag_size(++frag);
sum += skb_frag_size(++frag);
sum += skb_frag_size(++frag);
/* Walk through fragments adding latest fragment, testing it, and
* then removing stale fragments from the sum.
*/
stale = &skb_shinfo(skb)->frags[0];
for (;;) {
sum += skb_frag_size(++frag);
/* if sum is negative we failed to make sufficient progress */
if (sum < 0)
return true;
/* use pre-decrement to avoid processing last fragment */
if (!--nr_frags)
break; break;
}
num_frags--; sum -= skb_frag_size(++stale);
} while (num_frags);
} else {
if (num_frags >= I40E_MAX_BUFFER_TXD)
linearize = true;
} }
linearize_chk_done: return false;
return linearize;
} }
/** /**
...@@ -2869,43 +2863,6 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2869,43 +2863,6 @@ static inline void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i; tx_ring->next_to_use = i;
} }
/**
* i40e_xmit_descriptor_count - calculate number of tx descriptors needed
* @skb: send buffer
* @tx_ring: ring to send buffer on
*
* Returns number of data descriptors needed for this skb. Returns 0 to indicate
* there is not enough descriptors available in this ring since we need at least
* one descriptor.
**/
#ifdef I40E_FCOE
inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
struct i40e_ring *tx_ring)
#else
static inline int i40e_xmit_descriptor_count(struct sk_buff *skb,
struct i40e_ring *tx_ring)
#endif
{
unsigned int f;
int count = 0;
/* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
* + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
* + 4 desc gap to avoid the cache line where head is,
* + 1 desc for context descriptor,
* otherwise try next time
*/
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
count += TXD_USE_COUNT(skb_headlen(skb));
if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
tx_ring->tx_stats.tx_busy++;
return 0;
}
return count;
}
/** /**
* i40e_xmit_frame_ring - Sends buffer on Tx ring * i40e_xmit_frame_ring - Sends buffer on Tx ring
* @skb: send buffer * @skb: send buffer
...@@ -2924,14 +2881,30 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, ...@@ -2924,14 +2881,30 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
__be16 protocol; __be16 protocol;
u32 td_cmd = 0; u32 td_cmd = 0;
u8 hdr_len = 0; u8 hdr_len = 0;
int tso, count;
int tsyn; int tsyn;
int tso;
/* prefetch the data, we'll need it later */ /* prefetch the data, we'll need it later */
prefetch(skb->data); prefetch(skb->data);
if (0 == i40e_xmit_descriptor_count(skb, tx_ring)) count = i40e_xmit_descriptor_count(skb);
if (i40e_chk_linearize(skb, count)) {
if (__skb_linearize(skb))
goto out_drop;
count = TXD_USE_COUNT(skb->len);
tx_ring->tx_stats.tx_linearize++;
}
/* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
* + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
* + 4 desc gap to avoid the cache line where head is,
* + 1 desc for context descriptor,
* otherwise try next time
*/
if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
tx_ring->tx_stats.tx_busy++;
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
}
/* prepare the xmit flags */ /* prepare the xmit flags */
if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) if (i40e_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
...@@ -2956,27 +2929,22 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, ...@@ -2956,27 +2929,22 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
else if (tso) else if (tso)
tx_flags |= I40E_TX_FLAGS_TSO; tx_flags |= I40E_TX_FLAGS_TSO;
/* Always offload the checksum, since it's in the data descriptor */
tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
tx_ring, &cd_tunneling);
if (tso < 0)
goto out_drop;
tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss); tsyn = i40e_tsyn(tx_ring, skb, tx_flags, &cd_type_cmd_tso_mss);
if (tsyn) if (tsyn)
tx_flags |= I40E_TX_FLAGS_TSYN; tx_flags |= I40E_TX_FLAGS_TSYN;
if (i40e_chk_linearize(skb, tx_flags)) {
if (skb_linearize(skb))
goto out_drop;
tx_ring->tx_stats.tx_linearize++;
}
skb_tx_timestamp(skb); skb_tx_timestamp(skb);
/* always enable CRC insertion offload */ /* always enable CRC insertion offload */
td_cmd |= I40E_TX_DESC_CMD_ICRC; td_cmd |= I40E_TX_DESC_CMD_ICRC;
/* Always offload the checksum, since it's in the data descriptor */
tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
tx_ring, &cd_tunneling);
if (tso < 0)
goto out_drop;
i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
cd_tunneling, cd_l2tag2); cd_tunneling, cd_l2tag2);
......
...@@ -331,13 +331,13 @@ int i40e_napi_poll(struct napi_struct *napi, int budget); ...@@ -331,13 +331,13 @@ int i40e_napi_poll(struct napi_struct *napi, int budget);
void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, void i40e_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
struct i40e_tx_buffer *first, u32 tx_flags, struct i40e_tx_buffer *first, u32 tx_flags,
const u8 hdr_len, u32 td_cmd, u32 td_offset); const u8 hdr_len, u32 td_cmd, u32 td_offset);
int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
int i40e_xmit_descriptor_count(struct sk_buff *skb, struct i40e_ring *tx_ring);
int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, int i40e_tx_prepare_vlan_flags(struct sk_buff *skb,
struct i40e_ring *tx_ring, u32 *flags); struct i40e_ring *tx_ring, u32 *flags);
#endif #endif
void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector); void i40e_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw); u32 i40e_get_tx_pending(struct i40e_ring *ring, bool in_sw);
int __i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __i40e_chk_linearize(struct sk_buff *skb);
/** /**
* i40e_get_head - Retrieve head from head writeback * i40e_get_head - Retrieve head from head writeback
...@@ -352,4 +352,63 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring) ...@@ -352,4 +352,63 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
return le32_to_cpu(*(volatile __le32 *)head); return le32_to_cpu(*(volatile __le32 *)head);
} }
/**
* i40e_xmit_descriptor_count - calculate number of Tx descriptors needed
* @skb: send buffer
* @tx_ring: ring to send buffer on
*
* Returns number of data descriptors needed for this skb. Returns 0 to indicate
* there is not enough descriptors available in this ring since we need at least
* one descriptor.
**/
static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
{
const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
int count = 0, size = skb_headlen(skb);
for (;;) {
count += TXD_USE_COUNT(size);
if (!nr_frags--)
break;
size = skb_frag_size(frag++);
}
return count;
}
/**
* i40e_maybe_stop_tx - 1st level check for Tx stop conditions
* @tx_ring: the ring to be checked
* @size: the size buffer we want to assure is available
*
* Returns 0 if stop is not needed
**/
static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
{
if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
return 0;
return __i40e_maybe_stop_tx(tx_ring, size);
}
/**
* i40e_chk_linearize - Check if there are more than 8 fragments per packet
* @skb: send buffer
* @count: number of buffers used
*
* Note: Our HW can't scatter-gather more than 8 fragments to build
* a packet on the wire and so we need to figure out the cases where we
* need to linearize the skb.
**/
static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
{
/* we can only support up to 8 data buffers for a single send */
if (likely(count <= I40E_MAX_BUFFER_TXD))
return false;
return __i40e_chk_linearize(skb);
}
#endif /* _I40E_TXRX_H_ */ #endif /* _I40E_TXRX_H_ */
...@@ -90,6 +90,22 @@ enum i40e_debug_mask { ...@@ -90,6 +90,22 @@ enum i40e_debug_mask {
I40E_DEBUG_ALL = 0xFFFFFFFF I40E_DEBUG_ALL = 0xFFFFFFFF
}; };
#define I40E_MDIO_STCODE 0
#define I40E_MDIO_OPCODE_ADDRESS 0
#define I40E_MDIO_OPCODE_WRITE I40E_MASK(1, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
#define I40E_MDIO_OPCODE_READ_INC_ADDR I40E_MASK(2, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
#define I40E_MDIO_OPCODE_READ I40E_MASK(3, \
I40E_GLGEN_MSCA_OPCODE_SHIFT)
#define I40E_PHY_COM_REG_PAGE 0x1E
#define I40E_PHY_LED_LINK_MODE_MASK 0xF0
#define I40E_PHY_LED_MANUAL_ON 0x100
#define I40E_PHY_LED_PROV_REG_1 0xC430
#define I40E_PHY_LED_MODE_MASK 0xFFFF
#define I40E_PHY_LED_MODE_ORIG 0x80000000
/* These are structs for managing the hardware information and the operations. /* These are structs for managing the hardware information and the operations.
* The structures of function pointers are filled out at init time when we * The structures of function pointers are filled out at init time when we
* know for sure exactly which hardware we're working with. This gives us the * know for sure exactly which hardware we're working with. This gives us the
......
...@@ -602,7 +602,7 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf) ...@@ -602,7 +602,7 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf)
* that VF queues be mapped using this method, even when they are * that VF queues be mapped using this method, even when they are
* contiguous in real life * contiguous in real life
*/ */
wr32(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id), i40e_write_rx_ctl(hw, I40E_VSILAN_QBASE(vf->lan_vsi_id),
I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK); I40E_VSILAN_QBASE_VSIQTABLE_ENA_MASK);
/* enable VF vplan_qtable mappings */ /* enable VF vplan_qtable mappings */
...@@ -630,7 +630,8 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf) ...@@ -630,7 +630,8 @@ static void i40e_enable_vf_mappings(struct i40e_vf *vf)
(j * 2) + 1); (j * 2) + 1);
reg |= qid << 16; reg |= qid << 16;
} }
wr32(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id), reg); i40e_write_rx_ctl(hw, I40E_VSILAN_QTABLE(j, vf->lan_vsi_id),
reg);
} }
i40e_flush(hw); i40e_flush(hw);
...@@ -2202,6 +2203,8 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev, ...@@ -2202,6 +2203,8 @@ int i40e_ndo_set_vf_port_vlan(struct net_device *netdev,
* and then reloading the VF driver. * and then reloading the VF driver.
*/ */
i40e_vc_disable_vf(pf, vf); i40e_vc_disable_vf(pf, vf);
/* During reset the VF got a new VSI, so refresh the pointer. */
vsi = pf->vsi[vf->lan_vsi_idx];
} }
/* Check for condition where there was already a port VLAN ID /* Check for condition where there was already a port VLAN ID
......
...@@ -146,6 +146,8 @@ enum i40e_admin_queue_opc { ...@@ -146,6 +146,8 @@ enum i40e_admin_queue_opc {
i40e_aqc_opc_set_port_parameters = 0x0203, i40e_aqc_opc_set_port_parameters = 0x0203,
i40e_aqc_opc_get_switch_resource_alloc = 0x0204, i40e_aqc_opc_get_switch_resource_alloc = 0x0204,
i40e_aqc_opc_set_switch_config = 0x0205, i40e_aqc_opc_set_switch_config = 0x0205,
i40e_aqc_opc_rx_ctl_reg_read = 0x0206,
i40e_aqc_opc_rx_ctl_reg_write = 0x0207,
i40e_aqc_opc_add_vsi = 0x0210, i40e_aqc_opc_add_vsi = 0x0210,
i40e_aqc_opc_update_vsi_parameters = 0x0211, i40e_aqc_opc_update_vsi_parameters = 0x0211,
...@@ -693,6 +695,20 @@ struct i40e_aqc_set_switch_config { ...@@ -693,6 +695,20 @@ struct i40e_aqc_set_switch_config {
I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config); I40E_CHECK_CMD_LENGTH(i40e_aqc_set_switch_config);
/* Read Receive control registers (direct 0x0206)
* Write Receive control registers (direct 0x0207)
* used for accessing Rx control registers that can be
* slow and need special handling when under high Rx load
*/
struct i40e_aqc_rx_ctl_reg_read_write {
__le32 reserved1;
__le32 address;
__le32 reserved2;
__le32 value;
};
I40E_CHECK_CMD_LENGTH(i40e_aqc_rx_ctl_reg_read_write);
/* Add VSI (indirect 0x0210) /* Add VSI (indirect 0x0210)
* this indirect command uses struct i40e_aqc_vsi_properties_data * this indirect command uses struct i40e_aqc_vsi_properties_data
* as the indirect buffer (128 bytes) * as the indirect buffer (128 bytes)
......
...@@ -903,6 +903,131 @@ struct i40e_rx_ptype_decoded i40evf_ptype_lookup[] = { ...@@ -903,6 +903,131 @@ struct i40e_rx_ptype_decoded i40evf_ptype_lookup[] = {
I40E_PTT_UNUSED_ENTRY(255) I40E_PTT_UNUSED_ENTRY(255)
}; };
/**
* i40evf_aq_rx_ctl_read_register - use FW to read from an Rx control register
* @hw: pointer to the hw struct
* @reg_addr: register address
* @reg_val: ptr to register value
* @cmd_details: pointer to command details structure or NULL
*
* Use the firmware to read the Rx control register,
* especially useful if the Rx unit is under heavy pressure
**/
i40e_status i40evf_aq_rx_ctl_read_register(struct i40e_hw *hw,
u32 reg_addr, u32 *reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_rx_ctl_reg_read_write *cmd_resp =
(struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
i40e_status status;
if (!reg_val)
return I40E_ERR_PARAM;
i40evf_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_rx_ctl_reg_read);
cmd_resp->address = cpu_to_le32(reg_addr);
status = i40evf_asq_send_command(hw, &desc, NULL, 0, cmd_details);
if (status == 0)
*reg_val = le32_to_cpu(cmd_resp->value);
return status;
}
/**
* i40evf_read_rx_ctl - read from an Rx control register
* @hw: pointer to the hw struct
* @reg_addr: register address
**/
u32 i40evf_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr)
{
i40e_status status = 0;
bool use_register;
int retry = 5;
u32 val = 0;
use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
if (!use_register) {
do_retry:
status = i40evf_aq_rx_ctl_read_register(hw, reg_addr,
&val, NULL);
if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
usleep_range(1000, 2000);
retry--;
goto do_retry;
}
}
/* if the AQ access failed, try the old-fashioned way */
if (status || use_register)
val = rd32(hw, reg_addr);
return val;
}
/**
* i40evf_aq_rx_ctl_write_register
* @hw: pointer to the hw struct
* @reg_addr: register address
* @reg_val: register value
* @cmd_details: pointer to command details structure or NULL
*
* Use the firmware to write to an Rx control register,
* especially useful if the Rx unit is under heavy pressure
**/
i40e_status i40evf_aq_rx_ctl_write_register(struct i40e_hw *hw,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details)
{
struct i40e_aq_desc desc;
struct i40e_aqc_rx_ctl_reg_read_write *cmd =
(struct i40e_aqc_rx_ctl_reg_read_write *)&desc.params.raw;
i40e_status status;
i40evf_fill_default_direct_cmd_desc(&desc,
i40e_aqc_opc_rx_ctl_reg_write);
cmd->address = cpu_to_le32(reg_addr);
cmd->value = cpu_to_le32(reg_val);
status = i40evf_asq_send_command(hw, &desc, NULL, 0, cmd_details);
return status;
}
/**
* i40evf_write_rx_ctl - write to an Rx control register
* @hw: pointer to the hw struct
* @reg_addr: register address
* @reg_val: register value
**/
void i40evf_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val)
{
i40e_status status = 0;
bool use_register;
int retry = 5;
use_register = (hw->aq.api_maj_ver == 1) && (hw->aq.api_min_ver < 5);
if (!use_register) {
do_retry:
status = i40evf_aq_rx_ctl_write_register(hw, reg_addr,
reg_val, NULL);
if (hw->aq.asq_last_status == I40E_AQ_RC_EAGAIN && retry) {
usleep_range(1000, 2000);
retry--;
goto do_retry;
}
}
/* if the AQ access failed, try the old-fashioned way */
if (status || use_register)
wr32(hw, reg_addr, reg_val);
}
/** /**
* i40e_aq_send_msg_to_pf * i40e_aq_send_msg_to_pf
* @hw: pointer to the hardware structure * @hw: pointer to the hardware structure
......
...@@ -103,4 +103,19 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw, ...@@ -103,4 +103,19 @@ i40e_status i40e_aq_add_rem_control_packet_filter(struct i40e_hw *hw,
struct i40e_asq_cmd_details *cmd_details); struct i40e_asq_cmd_details *cmd_details);
void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw, void i40e_add_filter_to_drop_tx_flow_control_frames(struct i40e_hw *hw,
u16 vsi_seid); u16 vsi_seid);
i40e_status i40evf_aq_rx_ctl_read_register(struct i40e_hw *hw,
u32 reg_addr, u32 *reg_val,
struct i40e_asq_cmd_details *cmd_details);
u32 i40evf_read_rx_ctl(struct i40e_hw *hw, u32 reg_addr);
i40e_status i40evf_aq_rx_ctl_write_register(struct i40e_hw *hw,
u32 reg_addr, u32 reg_val,
struct i40e_asq_cmd_details *cmd_details);
void i40evf_write_rx_ctl(struct i40e_hw *hw, u32 reg_addr, u32 reg_val);
i40e_status i40e_read_phy_register(struct i40e_hw *hw, u8 page,
u16 reg, u8 phy_addr, u16 *value);
i40e_status i40e_write_phy_register(struct i40e_hw *hw, u8 page,
u16 reg, u8 phy_addr, u16 value);
u8 i40e_get_phy_address(struct i40e_hw *hw, u8 dev_num);
i40e_status i40e_blink_phy_link_led(struct i40e_hw *hw,
u32 time, u32 interval);
#endif /* _I40E_PROTOTYPE_H_ */ #endif /* _I40E_PROTOTYPE_H_ */
...@@ -1796,59 +1796,71 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring, ...@@ -1796,59 +1796,71 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
} }
/** /**
* i40e_chk_linearize - Check if there are more than 8 fragments per packet * __i40evf_chk_linearize - Check if there are more than 8 fragments per packet
* @skb: send buffer * @skb: send buffer
* @tx_flags: collected send information
* *
* Note: Our HW can't scatter-gather more than 8 fragments to build * Note: Our HW can't scatter-gather more than 8 fragments to build
* a packet on the wire and so we need to figure out the cases where we * a packet on the wire and so we need to figure out the cases where we
* need to linearize the skb. * need to linearize the skb.
**/ **/
static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags) bool __i40evf_chk_linearize(struct sk_buff *skb)
{ {
struct skb_frag_struct *frag; const struct skb_frag_struct *frag, *stale;
bool linearize = false; int gso_size, nr_frags, sum;
unsigned int size = 0;
u16 num_frags;
u16 gso_segs;
num_frags = skb_shinfo(skb)->nr_frags; /* check to see if TSO is enabled, if so we may get a repreive */
gso_segs = skb_shinfo(skb)->gso_segs; gso_size = skb_shinfo(skb)->gso_size;
if (unlikely(!gso_size))
return true;
if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) { /* no need to check if number of frags is less than 8 */
u16 j = 0; nr_frags = skb_shinfo(skb)->nr_frags;
if (nr_frags < I40E_MAX_BUFFER_TXD)
return false;
if (num_frags < (I40E_MAX_BUFFER_TXD)) /* We need to walk through the list and validate that each group
goto linearize_chk_done; * of 6 fragments totals at least gso_size. However we don't need
/* try the simple math, if we have too many frags per segment */ * to perform such validation on the first or last 6 since the first
if (DIV_ROUND_UP((num_frags + gso_segs), gso_segs) > * 6 cannot inherit any data from a descriptor before them, and the
I40E_MAX_BUFFER_TXD) { * last 6 cannot inherit any data from a descriptor after them.
linearize = true; */
goto linearize_chk_done; nr_frags -= I40E_MAX_BUFFER_TXD - 1;
}
frag = &skb_shinfo(skb)->frags[0]; frag = &skb_shinfo(skb)->frags[0];
/* we might still have more fragments per segment */
do { /* Initialize size to the negative value of gso_size minus 1. We
size += skb_frag_size(frag); * use this as the worst case scenerio in which the frag ahead
frag++; j++; * of us only provides one byte which is why we are limited to 6
if ((size >= skb_shinfo(skb)->gso_size) && * descriptors for a single transmit as the header and previous
(j < I40E_MAX_BUFFER_TXD)) { * fragment are already consuming 2 descriptors.
size = (size % skb_shinfo(skb)->gso_size); */
j = (size) ? 1 : 0; sum = 1 - gso_size;
}
if (j == I40E_MAX_BUFFER_TXD) { /* Add size of frags 1 through 5 to create our initial sum */
linearize = true; sum += skb_frag_size(++frag);
sum += skb_frag_size(++frag);
sum += skb_frag_size(++frag);
sum += skb_frag_size(++frag);
sum += skb_frag_size(++frag);
/* Walk through fragments adding latest fragment, testing it, and
* then removing stale fragments from the sum.
*/
stale = &skb_shinfo(skb)->frags[0];
for (;;) {
sum += skb_frag_size(++frag);
/* if sum is negative we failed to make sufficient progress */
if (sum < 0)
return true;
/* use pre-decrement to avoid processing last fragment */
if (!--nr_frags)
break; break;
}
num_frags--; sum -= skb_frag_size(++stale);
} while (num_frags);
} else {
if (num_frags >= I40E_MAX_BUFFER_TXD)
linearize = true;
} }
linearize_chk_done: return false;
return linearize;
} }
/** /**
...@@ -1858,7 +1870,7 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags) ...@@ -1858,7 +1870,7 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
* *
* Returns -EBUSY if a stop is needed, else 0 * Returns -EBUSY if a stop is needed, else 0
**/ **/
static inline int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
{ {
netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index);
/* Memory barrier before checking head and tail */ /* Memory barrier before checking head and tail */
...@@ -1874,20 +1886,6 @@ static inline int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size) ...@@ -1874,20 +1886,6 @@ static inline int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
return 0; return 0;
} }
/**
* i40evf_maybe_stop_tx - 1st level check for tx stop conditions
* @tx_ring: the ring to be checked
* @size: the size buffer we want to assure is available
*
* Returns 0 if stop is not needed
**/
static inline int i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
{
if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
return 0;
return __i40evf_maybe_stop_tx(tx_ring, size);
}
/** /**
* i40evf_tx_map - Build the Tx descriptor * i40evf_tx_map - Build the Tx descriptor
* @tx_ring: ring to send buffer on * @tx_ring: ring to send buffer on
...@@ -2003,7 +2001,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2003,7 +2001,7 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev, netdev_tx_sent_queue(netdev_get_tx_queue(tx_ring->netdev,
tx_ring->queue_index), tx_ring->queue_index),
first->bytecount); first->bytecount);
i40evf_maybe_stop_tx(tx_ring, DESC_NEEDED); i40e_maybe_stop_tx(tx_ring, DESC_NEEDED);
/* Algorithm to optimize tail and RS bit setting: /* Algorithm to optimize tail and RS bit setting:
* if xmit_more is supported * if xmit_more is supported
...@@ -2085,38 +2083,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb, ...@@ -2085,38 +2083,6 @@ static inline void i40evf_tx_map(struct i40e_ring *tx_ring, struct sk_buff *skb,
tx_ring->next_to_use = i; tx_ring->next_to_use = i;
} }
/**
* i40evf_xmit_descriptor_count - calculate number of tx descriptors needed
* @skb: send buffer
* @tx_ring: ring to send buffer on
*
* Returns number of data descriptors needed for this skb. Returns 0 to indicate
* there is not enough descriptors available in this ring since we need at least
* one descriptor.
**/
static inline int i40evf_xmit_descriptor_count(struct sk_buff *skb,
struct i40e_ring *tx_ring)
{
unsigned int f;
int count = 0;
/* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
* + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
* + 4 desc gap to avoid the cache line where head is,
* + 1 desc for context descriptor,
* otherwise try next time
*/
for (f = 0; f < skb_shinfo(skb)->nr_frags; f++)
count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size);
count += TXD_USE_COUNT(skb_headlen(skb));
if (i40evf_maybe_stop_tx(tx_ring, count + 4 + 1)) {
tx_ring->tx_stats.tx_busy++;
return 0;
}
return count;
}
/** /**
* i40e_xmit_frame_ring - Sends buffer on Tx ring * i40e_xmit_frame_ring - Sends buffer on Tx ring
* @skb: send buffer * @skb: send buffer
...@@ -2135,13 +2101,29 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, ...@@ -2135,13 +2101,29 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
__be16 protocol; __be16 protocol;
u32 td_cmd = 0; u32 td_cmd = 0;
u8 hdr_len = 0; u8 hdr_len = 0;
int tso; int tso, count;
/* prefetch the data, we'll need it later */ /* prefetch the data, we'll need it later */
prefetch(skb->data); prefetch(skb->data);
if (0 == i40evf_xmit_descriptor_count(skb, tx_ring)) count = i40e_xmit_descriptor_count(skb);
if (i40e_chk_linearize(skb, count)) {
if (__skb_linearize(skb))
goto out_drop;
count = TXD_USE_COUNT(skb->len);
tx_ring->tx_stats.tx_linearize++;
}
/* need: 1 descriptor per page * PAGE_SIZE/I40E_MAX_DATA_PER_TXD,
* + 1 desc for skb_head_len/I40E_MAX_DATA_PER_TXD,
* + 4 desc gap to avoid the cache line where head is,
* + 1 desc for context descriptor,
* otherwise try next time
*/
if (i40e_maybe_stop_tx(tx_ring, count + 4 + 1)) {
tx_ring->tx_stats.tx_busy++;
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
}
/* prepare the xmit flags */ /* prepare the xmit flags */
if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags)) if (i40evf_tx_prepare_vlan_flags(skb, tx_ring, &tx_flags))
...@@ -2166,22 +2148,17 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb, ...@@ -2166,22 +2148,17 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
else if (tso) else if (tso)
tx_flags |= I40E_TX_FLAGS_TSO; tx_flags |= I40E_TX_FLAGS_TSO;
if (i40e_chk_linearize(skb, tx_flags)) {
if (skb_linearize(skb))
goto out_drop;
tx_ring->tx_stats.tx_linearize++;
}
skb_tx_timestamp(skb);
/* always enable CRC insertion offload */
td_cmd |= I40E_TX_DESC_CMD_ICRC;
/* Always offload the checksum, since it's in the data descriptor */ /* Always offload the checksum, since it's in the data descriptor */
tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset, tso = i40e_tx_enable_csum(skb, &tx_flags, &td_cmd, &td_offset,
tx_ring, &cd_tunneling); tx_ring, &cd_tunneling);
if (tso < 0) if (tso < 0)
goto out_drop; goto out_drop;
skb_tx_timestamp(skb);
/* always enable CRC insertion offload */
td_cmd |= I40E_TX_DESC_CMD_ICRC;
i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss, i40e_create_tx_ctx(tx_ring, cd_type_cmd_tso_mss,
cd_tunneling, cd_l2tag2); cd_tunneling, cd_l2tag2);
......
...@@ -326,6 +326,8 @@ void i40evf_free_rx_resources(struct i40e_ring *rx_ring); ...@@ -326,6 +326,8 @@ void i40evf_free_rx_resources(struct i40e_ring *rx_ring);
int i40evf_napi_poll(struct napi_struct *napi, int budget); int i40evf_napi_poll(struct napi_struct *napi, int budget);
void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector); void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw); u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw);
int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
bool __i40evf_chk_linearize(struct sk_buff *skb);
/** /**
* i40e_get_head - Retrieve head from head writeback * i40e_get_head - Retrieve head from head writeback
...@@ -340,4 +342,63 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring) ...@@ -340,4 +342,63 @@ static inline u32 i40e_get_head(struct i40e_ring *tx_ring)
return le32_to_cpu(*(volatile __le32 *)head); return le32_to_cpu(*(volatile __le32 *)head);
} }
/**
* i40e_xmit_descriptor_count - calculate number of Tx descriptors needed
* @skb: send buffer
* @tx_ring: ring to send buffer on
*
* Returns number of data descriptors needed for this skb. Returns 0 to indicate
* there is not enough descriptors available in this ring since we need at least
* one descriptor.
**/
static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
{
const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
int count = 0, size = skb_headlen(skb);
for (;;) {
count += TXD_USE_COUNT(size);
if (!nr_frags--)
break;
size = skb_frag_size(frag++);
}
return count;
}
/**
* i40e_maybe_stop_tx - 1st level check for Tx stop conditions
* @tx_ring: the ring to be checked
* @size: the size buffer we want to assure is available
*
* Returns 0 if stop is not needed
**/
static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
{
if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
return 0;
return __i40evf_maybe_stop_tx(tx_ring, size);
}
/**
* i40e_chk_linearize - Check if there are more than 8 fragments per packet
* @skb: send buffer
* @count: number of buffers used
*
* Note: Our HW can't scatter-gather more than 8 fragments to build
* a packet on the wire and so we need to figure out the cases where we
* need to linearize the skb.
**/
static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
{
/* we can only support up to 8 data buffers for a single send */
if (likely(count <= I40E_MAX_BUFFER_TXD))
return false;
return __i40evf_chk_linearize(skb);
}
#endif /* _I40E_TXRX_H_ */ #endif /* _I40E_TXRX_H_ */
...@@ -38,7 +38,7 @@ static const char i40evf_driver_string[] = ...@@ -38,7 +38,7 @@ static const char i40evf_driver_string[] =
#define DRV_VERSION_MAJOR 1 #define DRV_VERSION_MAJOR 1
#define DRV_VERSION_MINOR 4 #define DRV_VERSION_MINOR 4
#define DRV_VERSION_BUILD 11 #define DRV_VERSION_BUILD 15
#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \ #define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
__stringify(DRV_VERSION_MINOR) "." \ __stringify(DRV_VERSION_MINOR) "." \
__stringify(DRV_VERSION_BUILD) \ __stringify(DRV_VERSION_BUILD) \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment