Commit 41837ca9 authored by Emmanuel Grumbach's avatar Emmanuel Grumbach

iwlwifi: pcie: allow to pretend to have Tx CSUM for debug

Allow to configure the driver to pretend to have TX CSUM
offload support. This will be useful to test the TSO flows
that will come in further patches.
This configuration is disabled by default.
Signed-off-by: default avatarJohannes Berg <johannes.berg@intel.com>
Signed-off-by: default avatarEmmanuel Grumbach <emmanuel.grumbach@intel.com>
parent cb2f8277
...@@ -478,6 +478,7 @@ struct iwl_hcmd_arr { ...@@ -478,6 +478,7 @@ struct iwl_hcmd_arr {
* in DWORD (as opposed to bytes) * in DWORD (as opposed to bytes)
* @scd_set_active: should the transport configure the SCD for HCMD queue * @scd_set_active: should the transport configure the SCD for HCMD queue
* @wide_cmd_header: firmware supports wide host command header * @wide_cmd_header: firmware supports wide host command header
* @sw_csum_tx: transport should compute the TCP checksum
* @command_groups: array of command groups, each member is an array of the * @command_groups: array of command groups, each member is an array of the
* commands in the group; for debugging only * commands in the group; for debugging only
* @command_groups_size: number of command groups, to avoid illegal access * @command_groups_size: number of command groups, to avoid illegal access
...@@ -497,6 +498,7 @@ struct iwl_trans_config { ...@@ -497,6 +498,7 @@ struct iwl_trans_config {
bool bc_table_dword; bool bc_table_dword;
bool scd_set_active; bool scd_set_active;
bool wide_cmd_header; bool wide_cmd_header;
bool sw_csum_tx;
const struct iwl_hcmd_arr *command_groups; const struct iwl_hcmd_arr *command_groups;
int command_groups_size; int command_groups_size;
......
...@@ -106,6 +106,7 @@ ...@@ -106,6 +106,7 @@
#define IWL_MVM_RS_RSSI_BASED_INIT_RATE 0 #define IWL_MVM_RS_RSSI_BASED_INIT_RATE 0
#define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK 1 #define IWL_MVM_RS_80_20_FAR_RANGE_TWEAK 1
#define IWL_MVM_TOF_IS_RESPONDER 0 #define IWL_MVM_TOF_IS_RESPONDER 0
#define IWL_MVM_SW_TX_CSUM_OFFLOAD 0
#define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1 #define IWL_MVM_RS_NUM_TRY_BEFORE_ANT_TOGGLE 1
#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE 2
#define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1 #define IWL_MVM_RS_HT_VHT_RETRIES_PER_RATE_TW 1
......
...@@ -667,6 +667,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) ...@@ -667,6 +667,9 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
if (!iwl_mvm_is_csum_supported(mvm)) if (!iwl_mvm_is_csum_supported(mvm))
hw->netdev_features &= ~NETIF_F_RXCSUM; hw->netdev_features &= ~NETIF_F_RXCSUM;
if (IWL_MVM_SW_TX_CSUM_OFFLOAD)
hw->netdev_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
ret = ieee80211_register_hw(mvm->hw); ret = ieee80211_register_hw(mvm->hw);
if (ret) if (ret)
iwl_mvm_leds_exit(mvm); iwl_mvm_leds_exit(mvm);
......
...@@ -541,6 +541,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ...@@ -541,6 +541,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
trans_cfg.scd_set_active = true; trans_cfg.scd_set_active = true;
trans_cfg.sdio_adma_addr = fw->sdio_adma_addr; trans_cfg.sdio_adma_addr = fw->sdio_adma_addr;
trans_cfg.sw_csum_tx = IWL_MVM_SW_TX_CSUM_OFFLOAD;
/* Set a short watchdog for the command queue */ /* Set a short watchdog for the command queue */
trans_cfg.cmd_q_wdg_timeout = trans_cfg.cmd_q_wdg_timeout =
......
...@@ -307,6 +307,8 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx) ...@@ -307,6 +307,8 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
* @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes) * @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
* @scd_set_active: should the transport configure the SCD for HCMD queue * @scd_set_active: should the transport configure the SCD for HCMD queue
* @wide_cmd_header: true when ucode supports wide command header format * @wide_cmd_header: true when ucode supports wide command header format
* @sw_csum_tx: if true, then the transport will compute the csum of the TXed
* frame.
* @rx_page_order: page order for receive buffer size * @rx_page_order: page order for receive buffer size
* @reg_lock: protect hw register access * @reg_lock: protect hw register access
* @mutex: to protect stop_device / start_fw / start_hw * @mutex: to protect stop_device / start_fw / start_hw
...@@ -361,6 +363,7 @@ struct iwl_trans_pcie { ...@@ -361,6 +363,7 @@ struct iwl_trans_pcie {
bool bc_table_dword; bool bc_table_dword;
bool scd_set_active; bool scd_set_active;
bool wide_cmd_header; bool wide_cmd_header;
bool sw_csum_tx;
u32 rx_page_order; u32 rx_page_order;
/*protect hw register */ /*protect hw register */
......
...@@ -1442,6 +1442,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, ...@@ -1442,6 +1442,7 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
trans_pcie->wide_cmd_header = trans_cfg->wide_cmd_header; trans_pcie->wide_cmd_header = trans_cfg->wide_cmd_header;
trans_pcie->bc_table_dword = trans_cfg->bc_table_dword; trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
trans_pcie->scd_set_active = trans_cfg->scd_set_active; trans_pcie->scd_set_active = trans_cfg->scd_set_active;
trans_pcie->sw_csum_tx = trans_cfg->sw_csum_tx;
trans->command_groups = trans_cfg->command_groups; trans->command_groups = trans_cfg->command_groups;
trans->command_groups_size = trans_cfg->command_groups_size; trans->command_groups_size = trans_cfg->command_groups_size;
......
...@@ -1823,6 +1823,19 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, ...@@ -1823,6 +1823,19 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
"TX on unused queue %d\n", txq_id)) "TX on unused queue %d\n", txq_id))
return -EINVAL; return -EINVAL;
if (unlikely(trans_pcie->sw_csum_tx &&
skb->ip_summed == CHECKSUM_PARTIAL)) {
int offs = skb_checksum_start_offset(skb);
int csum_offs = offs + skb->csum_offset;
__wsum csum;
if (skb_ensure_writable(skb, csum_offs + sizeof(__sum16)))
return -1;
csum = skb_checksum(skb, offs, skb->len - offs, 0);
*(__sum16 *)(skb->data + csum_offs) = csum_fold(csum);
}
if (skb_is_nonlinear(skb) && if (skb_is_nonlinear(skb) &&
skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS && skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS &&
__skb_linearize(skb)) __skb_linearize(skb))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment