Commit f3e40bec authored by Kalle Valo's avatar Kalle Valo

Merge tag 'iwlwifi-next-for-kalle-2018-08-02' of...

Merge tag 'iwlwifi-next-for-kalle-2018-08-02' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next

More iwlwifi patches for 4.19

* New PCI IDs for 22000 and 22560;
* Some fixes and an improvement in debug dumps and recording;
* Remove some old dead code;
* Fix compilation when only IPv6 is not enabled;
* Continued work on 22560 devices;
parents 24ebfcbd 45904e7e
...@@ -14,7 +14,6 @@ iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o ...@@ -14,7 +14,6 @@ iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
iwlwifi-objs += iwl-trans.o iwlwifi-objs += iwl-trans.o
iwlwifi-objs += fw/notif-wait.o iwlwifi-objs += fw/notif-wait.o
iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o fw/dbg.o iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o fw/dbg.o
iwlwifi-$(CONFIG_IWLMVM) += fw/common_rx.o
iwlwifi-$(CONFIG_ACPI) += fw/acpi.o iwlwifi-$(CONFIG_ACPI) += fw/acpi.o
iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += fw/debugfs.o iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += fw/debugfs.o
......
...@@ -59,7 +59,7 @@ ...@@ -59,7 +59,7 @@
#define IWL_22000_UCODE_API_MAX 38 #define IWL_22000_UCODE_API_MAX 38
/* Lowest firmware API version supported */ /* Lowest firmware API version supported */
#define IWL_22000_UCODE_API_MIN 24 #define IWL_22000_UCODE_API_MIN 39
/* NVM versions */ /* NVM versions */
#define IWL_22000_NVM_VERSION 0x0a1d #define IWL_22000_NVM_VERSION 0x0a1d
......
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright (C) 2018 Intel Corporation
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
...@@ -30,6 +31,7 @@ ...@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright (C) 2018 Intel Corporation
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -187,20 +189,4 @@ struct iwl_card_state_notif { ...@@ -187,20 +189,4 @@ struct iwl_card_state_notif {
__le32 flags; __le32 flags;
} __packed; /* CARD_STATE_NTFY_API_S_VER_1 */ } __packed; /* CARD_STATE_NTFY_API_S_VER_1 */
/**
* struct iwl_fseq_ver_mismatch_nty - Notification about version
*
* This notification does not have a direct impact on the init flow.
* It means that another core (not WiFi) has initiated the FSEQ flow
* and updated the FSEQ version. The driver only prints an error when
* this occurs.
*
* @aux_read_fseq_ver: auxiliary read FSEQ version
* @wifi_fseq_ver: FSEQ version (embedded in WiFi)
*/
struct iwl_fseq_ver_mismatch_ntf {
__le32 aux_read_fseq_ver;
__le32 wifi_fseq_ver;
} __packed; /* FSEQ_VER_MISMATCH_NTFY_API_S_VER_1 */
#endif /* __iwl_fw_api_alive_h__ */ #endif /* __iwl_fw_api_alive_h__ */
...@@ -647,13 +647,6 @@ enum iwl_system_subcmd_ids { ...@@ -647,13 +647,6 @@ enum iwl_system_subcmd_ids {
* @INIT_EXTENDED_CFG_CMD: &struct iwl_init_extended_cfg_cmd * @INIT_EXTENDED_CFG_CMD: &struct iwl_init_extended_cfg_cmd
*/ */
INIT_EXTENDED_CFG_CMD = 0x03, INIT_EXTENDED_CFG_CMD = 0x03,
/**
* @FSEQ_VER_MISMATCH_NTF: Notification about fseq version
* mismatch during init. The format is specified in
* &struct iwl_fseq_ver_mismatch_ntf.
*/
FSEQ_VER_MISMATCH_NTF = 0xFF,
}; };
#endif /* __iwl_fw_api_commands_h__ */ #endif /* __iwl_fw_api_commands_h__ */
...@@ -8,6 +8,7 @@ ...@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright (C) 2018 Intel Corporation
* *
* This program is free software; you can redistribute it and/or modify * This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as * it under the terms of version 2 of the GNU General Public License as
...@@ -30,6 +31,7 @@ ...@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright (C) 2018 Intel Corporation
* All rights reserved. * All rights reserved.
* *
* Redistribution and use in source and binary forms, with or without * Redistribution and use in source and binary forms, with or without
...@@ -87,6 +89,11 @@ enum iwl_data_path_subcmd_ids { ...@@ -87,6 +89,11 @@ enum iwl_data_path_subcmd_ids {
*/ */
STA_HE_CTXT_CMD = 0x7, STA_HE_CTXT_CMD = 0x7,
/**
* @RFH_QUEUE_CONFIG_CMD: &struct iwl_rfh_queue_config
*/
RFH_QUEUE_CONFIG_CMD = 0xD,
/** /**
* @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd * @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd
*/ */
......
...@@ -377,78 +377,94 @@ enum iwl_rx_he_phy { ...@@ -377,78 +377,94 @@ enum iwl_rx_he_phy {
}; };
/** /**
* struct iwl_rx_mpdu_desc - RX MPDU descriptor * struct iwl_rx_mpdu_desc_v1 - RX MPDU descriptor
*/ */
struct iwl_rx_mpdu_desc { struct iwl_rx_mpdu_desc_v1 {
/* DW2 */ /* DW7 - carries rss_hash only when rpa_en == 1 */
/**
* @mpdu_len: MPDU length
*/
__le16 mpdu_len;
/**
* @mac_flags1: &enum iwl_rx_mpdu_mac_flags1
*/
u8 mac_flags1;
/**
* @mac_flags2: &enum iwl_rx_mpdu_mac_flags2
*/
u8 mac_flags2;
/* DW3 */
/** /**
* @amsdu_info: &enum iwl_rx_mpdu_amsdu_info * @rss_hash: RSS hash value
*/ */
u8 amsdu_info; __le32 rss_hash;
/* DW8 - carries filter_match only when rpa_en == 1 */
/** /**
* @phy_info: &enum iwl_rx_mpdu_phy_info * @filter_match: filter match value
*/ */
__le16 phy_info; __le32 filter_match;
/* DW9 */
/** /**
* @mac_phy_idx: MAC/PHY index * @rate_n_flags: RX rate/flags encoding
*/ */
u8 mac_phy_idx; __le32 rate_n_flags;
/* DW4 - carries csum data only when rpa_en == 1 */ /* DW10 */
/** /**
* @raw_csum: raw checksum (alledgedly unreliable) * @energy_a: energy chain A
*/ */
__le16 raw_csum; u8 energy_a;
/** /**
* @l3l4_flags: &enum iwl_rx_l3l4_flags * @energy_b: energy chain B
*/ */
__le16 l3l4_flags; u8 energy_b;
/* DW5 */
/** /**
* @status: &enum iwl_rx_mpdu_status * @channel: channel number
*/ */
__le16 status; u8 channel;
/** /**
* @hash_filter: hash filter value * @mac_context: MAC context mask
*/ */
u8 hash_filter; u8 mac_context;
/* DW11 */
/** /**
* @sta_id_flags: &enum iwl_rx_mpdu_sta_id_flags * @gp2_on_air_rise: GP2 timer value on air rise (INA)
*/ */
u8 sta_id_flags; __le32 gp2_on_air_rise;
/* DW6 */ /* DW12 & DW13 */
union {
/**
* @tsf_on_air_rise:
* TSF value on air rise (INA), only valid if
* %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set
*/
__le64 tsf_on_air_rise;
/**
* @he_phy_data:
* HE PHY data, see &enum iwl_rx_he_phy, valid
* only if %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set
*/
__le64 he_phy_data;
};
} __packed;
/**
* struct iwl_rx_mpdu_desc_v3 - RX MPDU descriptor
*/
struct iwl_rx_mpdu_desc_v3 {
/* DW7 - carries filter_match only when rpa_en == 1 */
/** /**
* @reorder_data: &enum iwl_rx_mpdu_reorder_data * @filter_match: filter match value
*/ */
__le32 reorder_data; __le32 filter_match;
/* DW7 - carries rss_hash only when rpa_en == 1 */ /* DW8 - carries rss_hash only when rpa_en == 1 */
/** /**
* @rss_hash: RSS hash value * @rss_hash: RSS hash value
*/ */
__le32 rss_hash; __le32 rss_hash;
/* DW8 - carries filter_match only when rpa_en == 1 */ /* DW9 */
/** /**
* @filter_match: filter match value * @partial_hash: 31:0 ip/tcp header hash
* w/o some fields (such as IP SRC addr)
*/ */
__le32 filter_match; __le32 partial_hash;
/* DW9 */ /* DW10 */
/**
* @raw_xsum: raw xsum value
*/
__le32 raw_xsum;
/* DW11 */
/** /**
* @rate_n_flags: RX rate/flags encoding * @rate_n_flags: RX rate/flags encoding
*/ */
__le32 rate_n_flags; __le32 rate_n_flags;
/* DW10 */ /* DW12 */
/** /**
* @energy_a: energy chain A * @energy_a: energy chain A
*/ */
...@@ -465,12 +481,12 @@ struct iwl_rx_mpdu_desc { ...@@ -465,12 +481,12 @@ struct iwl_rx_mpdu_desc {
* @mac_context: MAC context mask * @mac_context: MAC context mask
*/ */
u8 mac_context; u8 mac_context;
/* DW11 */ /* DW13 */
/** /**
* @gp2_on_air_rise: GP2 timer value on air rise (INA) * @gp2_on_air_rise: GP2 timer value on air rise (INA)
*/ */
__le32 gp2_on_air_rise; __le32 gp2_on_air_rise;
/* DW12 & DW13 */ /* DW14 & DW15 */
union { union {
/** /**
* @tsf_on_air_rise: * @tsf_on_air_rise:
...@@ -485,7 +501,78 @@ struct iwl_rx_mpdu_desc { ...@@ -485,7 +501,78 @@ struct iwl_rx_mpdu_desc {
*/ */
__le64 he_phy_data; __le64 he_phy_data;
}; };
} __packed; /* DW16 & DW17 */
/**
* @reserved: reserved
*/
__le32 reserved[2];
} __packed; /* RX_MPDU_RES_START_API_S_VER_3 */
/**
* struct iwl_rx_mpdu_desc - RX MPDU descriptor
*/
struct iwl_rx_mpdu_desc {
/* DW2 */
/**
* @mpdu_len: MPDU length
*/
__le16 mpdu_len;
/**
* @mac_flags1: &enum iwl_rx_mpdu_mac_flags1
*/
u8 mac_flags1;
/**
* @mac_flags2: &enum iwl_rx_mpdu_mac_flags2
*/
u8 mac_flags2;
/* DW3 */
/**
* @amsdu_info: &enum iwl_rx_mpdu_amsdu_info
*/
u8 amsdu_info;
/**
* @phy_info: &enum iwl_rx_mpdu_phy_info
*/
__le16 phy_info;
/**
* @mac_phy_idx: MAC/PHY index
*/
u8 mac_phy_idx;
/* DW4 - carries csum data only when rpa_en == 1 */
/**
* @raw_csum: raw checksum (alledgedly unreliable)
*/
__le16 raw_csum;
/**
* @l3l4_flags: &enum iwl_rx_l3l4_flags
*/
__le16 l3l4_flags;
/* DW5 */
/**
* @status: &enum iwl_rx_mpdu_status
*/
__le16 status;
/**
* @hash_filter: hash filter value
*/
u8 hash_filter;
/**
* @sta_id_flags: &enum iwl_rx_mpdu_sta_id_flags
*/
u8 sta_id_flags;
/* DW6 */
/**
* @reorder_data: &enum iwl_rx_mpdu_reorder_data
*/
__le32 reorder_data;
union {
struct iwl_rx_mpdu_desc_v1 v1;
struct iwl_rx_mpdu_desc_v3 v3;
};
} __packed; /* RX_MPDU_RES_START_API_S_VER_3 */
#define IWL_RX_DESC_SIZE_V1 offsetofend(struct iwl_rx_mpdu_desc, v1)
struct iwl_frame_release { struct iwl_frame_release {
u8 baid; u8 baid;
...@@ -628,4 +715,36 @@ struct iwl_ba_window_status_notif { ...@@ -628,4 +715,36 @@ struct iwl_ba_window_status_notif {
__le16 mpdu_rx_count[BA_WINDOW_STREAMS_MAX]; __le16 mpdu_rx_count[BA_WINDOW_STREAMS_MAX];
} __packed; /* BA_WINDOW_STATUS_NTFY_API_S_VER_1 */ } __packed; /* BA_WINDOW_STATUS_NTFY_API_S_VER_1 */
/**
* struct iwl_rfh_queue_config - RX queue configuration
* @q_num: Q num
* @enable: enable queue
* @reserved: alignment
* @urbd_stts_wrptr: DMA address of urbd_stts_wrptr
* @fr_bd_cb: DMA address of freeRB table
* @ur_bd_cb: DMA address of used RB table
* @fr_bd_wid: Initial index of the free table
*/
struct iwl_rfh_queue_data {
u8 q_num;
u8 enable;
__le16 reserved;
__le64 urbd_stts_wrptr;
__le64 fr_bd_cb;
__le64 ur_bd_cb;
__le32 fr_bd_wid;
} __packed; /* RFH_QUEUE_CONFIG_S_VER_1 */
/**
* struct iwl_rfh_queue_config - RX queue configuration
* @num_queues: number of queues configured
* @reserved: alignment
* @data: DMA addresses per-queue
*/
struct iwl_rfh_queue_config {
u8 num_queues;
u8 reserved[3];
struct iwl_rfh_queue_data data[];
} __packed; /* RFH_QUEUE_CONFIG_API_S_VER_1 */
#endif /* __iwl_fw_api_rx_h__ */ #endif /* __iwl_fw_api_rx_h__ */
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <linuxwifi@intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#include "iwl-drv.h"
#include "runtime.h"
#include "fw/api/commands.h"
#include "fw/api/alive.h"
static void iwl_fwrt_fseq_ver_mismatch(struct iwl_fw_runtime *fwrt,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_fseq_ver_mismatch_ntf *fseq = (void *)pkt->data;
IWL_ERR(fwrt, "FSEQ version mismatch (aux: %d, wifi: %d)\n",
__le32_to_cpu(fseq->aux_read_fseq_ver),
__le32_to_cpu(fseq->wifi_fseq_ver));
}
void iwl_fwrt_handle_notification(struct iwl_fw_runtime *fwrt,
struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u32 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd);
switch (cmd) {
case WIDE_ID(SYSTEM_GROUP, FSEQ_VER_MISMATCH_NTF):
iwl_fwrt_fseq_ver_mismatch(fwrt, rxb);
break;
default:
break;
}
}
IWL_EXPORT_SYMBOL(iwl_fwrt_handle_notification);
...@@ -243,39 +243,47 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt, ...@@ -243,39 +243,47 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt,
if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) if (!iwl_trans_grab_nic_access(fwrt->trans, &flags))
return; return;
/* Pull RXF1 */ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) {
iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[0].rxfifo1_size, 0, 0); /* Pull RXF1 */
/* Pull RXF2 */ iwl_fwrt_dump_rxf(fwrt, dump_data,
iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size, cfg->lmac[0].rxfifo1_size, 0, 0);
RXF_DIFF_FROM_PREV, 1); /* Pull RXF2 */
/* Pull LMAC2 RXF1 */ iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size,
if (fwrt->smem_cfg.num_lmacs > 1) RXF_DIFF_FROM_PREV, 1);
iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[1].rxfifo1_size, /* Pull LMAC2 RXF1 */
LMAC2_PRPH_OFFSET, 2); if (fwrt->smem_cfg.num_lmacs > 1)
iwl_fwrt_dump_rxf(fwrt, dump_data,
/* Pull TXF data from LMAC1 */ cfg->lmac[1].rxfifo1_size,
for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) { LMAC2_PRPH_OFFSET, 2);
/* Mark the number of TXF we're pulling now */
iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM, i);
iwl_fwrt_dump_txf(fwrt, dump_data, cfg->lmac[0].txfifo_size[i],
0, i);
} }
/* Pull TXF data from LMAC2 */ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) {
if (fwrt->smem_cfg.num_lmacs > 1) { /* Pull TXF data from LMAC1 */
for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) { for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) {
/* Mark the number of TXF we're pulling now */ /* Mark the number of TXF we're pulling now */
iwl_trans_write_prph(fwrt->trans, iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM, i);
TXF_LARC_NUM + LMAC2_PRPH_OFFSET,
i);
iwl_fwrt_dump_txf(fwrt, dump_data, iwl_fwrt_dump_txf(fwrt, dump_data,
cfg->lmac[1].txfifo_size[i], cfg->lmac[0].txfifo_size[i], 0, i);
LMAC2_PRPH_OFFSET, }
i + cfg->num_txfifo_entries);
/* Pull TXF data from LMAC2 */
if (fwrt->smem_cfg.num_lmacs > 1) {
for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries;
i++) {
/* Mark the number of TXF we're pulling now */
iwl_trans_write_prph(fwrt->trans,
TXF_LARC_NUM +
LMAC2_PRPH_OFFSET, i);
iwl_fwrt_dump_txf(fwrt, dump_data,
cfg->lmac[1].txfifo_size[i],
LMAC2_PRPH_OFFSET,
i + cfg->num_txfifo_entries);
}
} }
} }
if (fw_has_capa(&fwrt->fw->ucode_capa, if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF) &&
fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
/* Pull UMAC internal TXF data from all TXFs */ /* Pull UMAC internal TXF data from all TXFs */
for (i = 0; for (i = 0;
...@@ -600,42 +608,54 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) ...@@ -600,42 +608,54 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) { if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
fifo_data_len = 0; fifo_data_len = 0;
/* Count RXF2 size */ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) {
if (mem_cfg->rxfifo2_size) {
/* Add header info */
fifo_data_len += mem_cfg->rxfifo2_size +
sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_fifo);
}
/* Count RXF1 sizes */
for (i = 0; i < mem_cfg->num_lmacs; i++) {
if (!mem_cfg->lmac[i].rxfifo1_size)
continue;
/* Add header info */
fifo_data_len += mem_cfg->lmac[i].rxfifo1_size +
sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_fifo);
}
/* Count TXF sizes */ /* Count RXF2 size */
for (i = 0; i < mem_cfg->num_lmacs; i++) { if (mem_cfg->rxfifo2_size) {
int j; /* Add header info */
fifo_data_len +=
mem_cfg->rxfifo2_size +
sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_fifo);
}
for (j = 0; j < mem_cfg->num_txfifo_entries; j++) { /* Count RXF1 sizes */
if (!mem_cfg->lmac[i].txfifo_size[j]) for (i = 0; i < mem_cfg->num_lmacs; i++) {
if (!mem_cfg->lmac[i].rxfifo1_size)
continue; continue;
/* Add header info */ /* Add header info */
fifo_data_len += fifo_data_len +=
mem_cfg->lmac[i].txfifo_size[j] + mem_cfg->lmac[i].rxfifo1_size +
sizeof(*dump_data) + sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_fifo); sizeof(struct iwl_fw_error_dump_fifo);
} }
} }
if (fw_has_capa(&fwrt->fw->ucode_capa, if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) {
size_t fifo_const_len = sizeof(*dump_data) +
sizeof(struct iwl_fw_error_dump_fifo);
/* Count TXF sizes */
for (i = 0; i < mem_cfg->num_lmacs; i++) {
int j;
for (j = 0; j < mem_cfg->num_txfifo_entries;
j++) {
if (!mem_cfg->lmac[i].txfifo_size[j])
continue;
/* Add header info */
fifo_data_len +=
fifo_const_len +
mem_cfg->lmac[i].txfifo_size[j];
}
}
}
if ((fwrt->fw->dbg_dump_mask &
BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF)) &&
fw_has_capa(&fwrt->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
for (i = 0; for (i = 0;
i < ARRAY_SIZE(mem_cfg->internal_txfifo_size); i < ARRAY_SIZE(mem_cfg->internal_txfifo_size);
...@@ -652,7 +672,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) ...@@ -652,7 +672,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
} }
/* Make room for PRPH registers */ /* Make room for PRPH registers */
if (!fwrt->trans->cfg->gen2) { if (!fwrt->trans->cfg->gen2 &&
fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH)) {
for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm); for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm);
i++) { i++) {
/* The range includes both boundaries */ /* The range includes both boundaries */
...@@ -667,7 +688,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) ...@@ -667,7 +688,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
} }
if (!fwrt->trans->cfg->gen2 && if (!fwrt->trans->cfg->gen2 &&
fwrt->trans->cfg->mq_rx_supported) { fwrt->trans->cfg->mq_rx_supported &&
fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH)) {
for (i = 0; i < for (i = 0; i <
ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) { ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) {
/* The range includes both boundaries */ /* The range includes both boundaries */
...@@ -681,34 +703,42 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) ...@@ -681,34 +703,42 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
} }
} }
if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 &&
fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RADIO_REG))
radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ; radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ;
} }
file_len = sizeof(*dump_file) + file_len = sizeof(*dump_file) +
sizeof(*dump_data) * 3 +
sizeof(*dump_smem_cfg) +
fifo_data_len + fifo_data_len +
prph_len + prph_len +
radio_len + radio_len;
sizeof(*dump_info);
if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO))
/* Make room for the SMEM, if it exists */ file_len += sizeof(*dump_data) + sizeof(*dump_info);
if (smem_len) if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG))
file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len; file_len += sizeof(*dump_data) + sizeof(*dump_smem_cfg);
/* Make room for the secondary SRAM, if it exists */ if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
if (sram2_len) /* Make room for the SMEM, if it exists */
file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len; if (smem_len)
file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
/* Make room for MEM segments */ smem_len;
for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) {
file_len += sizeof(*dump_data) + sizeof(*dump_mem) + /* Make room for the secondary SRAM, if it exists */
le32_to_cpu(fw_dbg_mem[i].len); if (sram2_len)
file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
sram2_len;
/* Make room for MEM segments */
for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) {
file_len += sizeof(*dump_data) + sizeof(*dump_mem) +
le32_to_cpu(fw_dbg_mem[i].len);
}
} }
/* Make room for fw's virtual image pages, if it exists */ /* Make room for fw's virtual image pages, if it exists */
if (!fwrt->trans->cfg->gen2 && if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
!fwrt->trans->cfg->gen2 &&
fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size && fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
fwrt->fw_paging_db[0].fw_paging_block) fwrt->fw_paging_db[0].fw_paging_block)
file_len += fwrt->num_of_paging_blk * file_len += fwrt->num_of_paging_blk *
...@@ -722,12 +752,14 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) ...@@ -722,12 +752,14 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
sizeof(*dump_info) + sizeof(*dump_smem_cfg); sizeof(*dump_info) + sizeof(*dump_smem_cfg);
} }
if (fwrt->dump.desc) if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
fwrt->dump.desc)
file_len += sizeof(*dump_data) + sizeof(*dump_trig) + file_len += sizeof(*dump_data) + sizeof(*dump_trig) +
fwrt->dump.desc->len; fwrt->dump.desc->len;
if (!fwrt->fw->n_dbg_mem_tlv) if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM) &&
file_len += sram_len + sizeof(*dump_mem); !fwrt->fw->n_dbg_mem_tlv)
file_len += sizeof(*dump_data) + sram_len + sizeof(*dump_mem);
dump_file = vzalloc(file_len); dump_file = vzalloc(file_len);
if (!dump_file) { if (!dump_file) {
...@@ -740,48 +772,56 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) ...@@ -740,48 +772,56 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER); dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
dump_data = (void *)dump_file->data; dump_data = (void *)dump_file->data;
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO); if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO)) {
dump_data->len = cpu_to_le32(sizeof(*dump_info)); dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO);
dump_info = (void *)dump_data->data; dump_data->len = cpu_to_le32(sizeof(*dump_info));
dump_info->device_family = dump_info = (void *)dump_data->data;
fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 ? dump_info->device_family =
cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) : fwrt->trans->cfg->device_family ==
cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8); IWL_DEVICE_FAMILY_7000 ?
dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev)); cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) :
memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable, cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8);
sizeof(dump_info->fw_human_readable)); dump_info->hw_step =
strncpy(dump_info->dev_human_readable, fwrt->trans->cfg->name, cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev));
sizeof(dump_info->dev_human_readable)); memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable,
strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name, sizeof(dump_info->fw_human_readable));
sizeof(dump_info->bus_human_readable)); strncpy(dump_info->dev_human_readable, fwrt->trans->cfg->name,
sizeof(dump_info->dev_human_readable) - 1);
dump_data = iwl_fw_error_next_data(dump_data); strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name,
sizeof(dump_info->bus_human_readable) - 1);
/* Dump shared memory configuration */
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG); dump_data = iwl_fw_error_next_data(dump_data);
dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg));
dump_smem_cfg = (void *)dump_data->data;
dump_smem_cfg->num_lmacs = cpu_to_le32(mem_cfg->num_lmacs);
dump_smem_cfg->num_txfifo_entries =
cpu_to_le32(mem_cfg->num_txfifo_entries);
for (i = 0; i < MAX_NUM_LMAC; i++) {
int j;
for (j = 0; j < TX_FIFO_MAX_NUM; j++)
dump_smem_cfg->lmac[i].txfifo_size[j] =
cpu_to_le32(mem_cfg->lmac[i].txfifo_size[j]);
dump_smem_cfg->lmac[i].rxfifo1_size =
cpu_to_le32(mem_cfg->lmac[i].rxfifo1_size);
}
dump_smem_cfg->rxfifo2_size = cpu_to_le32(mem_cfg->rxfifo2_size);
dump_smem_cfg->internal_txfifo_addr =
cpu_to_le32(mem_cfg->internal_txfifo_addr);
for (i = 0; i < TX_FIFO_INTERNAL_MAX_NUM; i++) {
dump_smem_cfg->internal_txfifo_size[i] =
cpu_to_le32(mem_cfg->internal_txfifo_size[i]);
} }
dump_data = iwl_fw_error_next_data(dump_data); if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG)) {
/* Dump shared memory configuration */
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG);
dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg));
dump_smem_cfg = (void *)dump_data->data;
dump_smem_cfg->num_lmacs = cpu_to_le32(mem_cfg->num_lmacs);
dump_smem_cfg->num_txfifo_entries =
cpu_to_le32(mem_cfg->num_txfifo_entries);
for (i = 0; i < MAX_NUM_LMAC; i++) {
int j;
u32 *txf_size = mem_cfg->lmac[i].txfifo_size;
for (j = 0; j < TX_FIFO_MAX_NUM; j++)
dump_smem_cfg->lmac[i].txfifo_size[j] =
cpu_to_le32(txf_size[j]);
dump_smem_cfg->lmac[i].rxfifo1_size =
cpu_to_le32(mem_cfg->lmac[i].rxfifo1_size);
}
dump_smem_cfg->rxfifo2_size =
cpu_to_le32(mem_cfg->rxfifo2_size);
dump_smem_cfg->internal_txfifo_addr =
cpu_to_le32(mem_cfg->internal_txfifo_addr);
for (i = 0; i < TX_FIFO_INTERNAL_MAX_NUM; i++) {
dump_smem_cfg->internal_txfifo_size[i] =
cpu_to_le32(mem_cfg->internal_txfifo_size[i]);
}
dump_data = iwl_fw_error_next_data(dump_data);
}
/* We only dump the FIFOs if the FW is in error state */ /* We only dump the FIFOs if the FW is in error state */
if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) { if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) {
...@@ -790,7 +830,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) ...@@ -790,7 +830,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
iwl_read_radio_regs(fwrt, &dump_data); iwl_read_radio_regs(fwrt, &dump_data);
} }
if (fwrt->dump.desc) { if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) &&
fwrt->dump.desc) {
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO); dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO);
dump_data->len = cpu_to_le32(sizeof(*dump_trig) + dump_data->len = cpu_to_le32(sizeof(*dump_trig) +
fwrt->dump.desc->len); fwrt->dump.desc->len);
...@@ -805,7 +846,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) ...@@ -805,7 +846,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
if (monitor_dump_only) if (monitor_dump_only)
goto dump_trans_data; goto dump_trans_data;
if (!fwrt->fw->n_dbg_mem_tlv) { if (!fwrt->fw->n_dbg_mem_tlv &&
fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem)); dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem));
dump_mem = (void *)dump_data->data; dump_mem = (void *)dump_data->data;
...@@ -821,6 +863,9 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) ...@@ -821,6 +863,9 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs); u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs);
bool success; bool success;
if (!(fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)))
break;
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(len + sizeof(*dump_mem)); dump_data->len = cpu_to_le32(len + sizeof(*dump_mem));
dump_mem = (void *)dump_data->data; dump_mem = (void *)dump_data->data;
...@@ -854,7 +899,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) ...@@ -854,7 +899,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
dump_data = iwl_fw_error_next_data(dump_data); dump_data = iwl_fw_error_next_data(dump_data);
} }
if (smem_len) { if (smem_len && fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
IWL_DEBUG_INFO(fwrt, "WRT SMEM dump\n"); IWL_DEBUG_INFO(fwrt, "WRT SMEM dump\n");
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem)); dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem));
...@@ -867,7 +912,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) ...@@ -867,7 +912,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
dump_data = iwl_fw_error_next_data(dump_data); dump_data = iwl_fw_error_next_data(dump_data);
} }
if (sram2_len) { if (sram2_len && fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) {
IWL_DEBUG_INFO(fwrt, "WRT SRAM dump\n"); IWL_DEBUG_INFO(fwrt, "WRT SRAM dump\n");
dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM);
dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem)); dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem));
...@@ -881,7 +926,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) ...@@ -881,7 +926,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt)
} }
/* Dump fw's virtual image */ /* Dump fw's virtual image */
if (!fwrt->trans->cfg->gen2 && if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) &&
!fwrt->trans->cfg->gen2 &&
fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size && fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size &&
fwrt->fw_paging_db[0].fw_paging_block) { fwrt->fw_paging_db[0].fw_paging_block) {
IWL_DEBUG_INFO(fwrt, "WRT paging dump\n"); IWL_DEBUG_INFO(fwrt, "WRT paging dump\n");
......
...@@ -146,6 +146,9 @@ enum iwl_ucode_tlv_type { ...@@ -146,6 +146,9 @@ enum iwl_ucode_tlv_type {
IWL_UCODE_TLV_FW_GSCAN_CAPA = 50, IWL_UCODE_TLV_FW_GSCAN_CAPA = 50,
IWL_UCODE_TLV_FW_MEM_SEG = 51, IWL_UCODE_TLV_FW_MEM_SEG = 51,
IWL_UCODE_TLV_IML = 52, IWL_UCODE_TLV_IML = 52,
/* TLVs 0x1000-0x2000 are for internal driver usage */
IWL_UCODE_TLV_FW_DBG_DUMP_LST = 0x1000,
}; };
struct iwl_ucode_tlv { struct iwl_ucode_tlv {
...@@ -318,7 +321,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t; ...@@ -318,7 +321,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t;
* IWL_UCODE_TLV_API_WIFI_MCC_UPDATE. When either is set, multi-source LAR * IWL_UCODE_TLV_API_WIFI_MCC_UPDATE. When either is set, multi-source LAR
* is supported. * is supported.
* @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC
* @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan (no longer used)
* @IWL_UCODE_TLV_CAPA_STA_PM_NOTIF: firmware will send STA PM notification * @IWL_UCODE_TLV_CAPA_STA_PM_NOTIF: firmware will send STA PM notification
* @IWL_UCODE_TLV_CAPA_TLC_OFFLOAD: firmware implements rate scaling algorithm * @IWL_UCODE_TLV_CAPA_TLC_OFFLOAD: firmware implements rate scaling algorithm
* @IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA: firmware implements quota related * @IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA: firmware implements quota related
...@@ -889,39 +892,4 @@ struct iwl_fw_dbg_conf_tlv { ...@@ -889,39 +892,4 @@ struct iwl_fw_dbg_conf_tlv {
struct iwl_fw_dbg_conf_hcmd hcmd; struct iwl_fw_dbg_conf_hcmd hcmd;
} __packed; } __packed;
/**
* struct iwl_fw_gscan_capabilities - gscan capabilities supported by FW
* @max_scan_cache_size: total space allocated for scan results (in bytes).
* @max_scan_buckets: maximum number of channel buckets.
* @max_ap_cache_per_scan: maximum number of APs that can be stored per scan.
* @max_rssi_sample_size: number of RSSI samples used for averaging RSSI.
* @max_scan_reporting_threshold: max possible report threshold. in percentage.
* @max_hotlist_aps: maximum number of entries for hotlist APs.
* @max_significant_change_aps: maximum number of entries for significant
* change APs.
* @max_bssid_history_entries: number of BSSID/RSSI entries that the device can
* hold.
* @max_hotlist_ssids: maximum number of entries for hotlist SSIDs.
* @max_number_epno_networks: max number of epno entries.
* @max_number_epno_networks_by_ssid: max number of epno entries if ssid is
* specified.
* @max_number_of_white_listed_ssid: max number of white listed SSIDs.
* @max_number_of_black_listed_ssid: max number of black listed SSIDs.
*/
struct iwl_fw_gscan_capabilities {
__le32 max_scan_cache_size;
__le32 max_scan_buckets;
__le32 max_ap_cache_per_scan;
__le32 max_rssi_sample_size;
__le32 max_scan_reporting_threshold;
__le32 max_hotlist_aps;
__le32 max_significant_change_aps;
__le32 max_bssid_history_entries;
__le32 max_hotlist_ssids;
__le32 max_number_epno_networks;
__le32 max_number_epno_networks_by_ssid;
__le32 max_number_of_white_listed_ssid;
__le32 max_number_of_black_listed_ssid;
} __packed;
#endif /* __iwl_fw_file_h__ */ #endif /* __iwl_fw_file_h__ */
...@@ -192,41 +192,6 @@ struct iwl_fw_cscheme_list { ...@@ -192,41 +192,6 @@ struct iwl_fw_cscheme_list {
struct iwl_fw_cipher_scheme cs[]; struct iwl_fw_cipher_scheme cs[];
} __packed; } __packed;
/**
* struct iwl_gscan_capabilities - gscan capabilities supported by FW
* @max_scan_cache_size: total space allocated for scan results (in bytes).
* @max_scan_buckets: maximum number of channel buckets.
* @max_ap_cache_per_scan: maximum number of APs that can be stored per scan.
* @max_rssi_sample_size: number of RSSI samples used for averaging RSSI.
* @max_scan_reporting_threshold: max possible report threshold. in percentage.
* @max_hotlist_aps: maximum number of entries for hotlist APs.
* @max_significant_change_aps: maximum number of entries for significant
* change APs.
* @max_bssid_history_entries: number of BSSID/RSSI entries that the device can
* hold.
* @max_hotlist_ssids: maximum number of entries for hotlist SSIDs.
* @max_number_epno_networks: max number of epno entries.
* @max_number_epno_networks_by_ssid: max number of epno entries if ssid is
* specified.
* @max_number_of_white_listed_ssid: max number of white listed SSIDs.
* @max_number_of_black_listed_ssid: max number of black listed SSIDs.
*/
struct iwl_gscan_capabilities {
u32 max_scan_cache_size;
u32 max_scan_buckets;
u32 max_ap_cache_per_scan;
u32 max_rssi_sample_size;
u32 max_scan_reporting_threshold;
u32 max_hotlist_aps;
u32 max_significant_change_aps;
u32 max_bssid_history_entries;
u32 max_hotlist_ssids;
u32 max_number_epno_networks;
u32 max_number_epno_networks_by_ssid;
u32 max_number_of_white_listed_ssid;
u32 max_number_of_black_listed_ssid;
};
/** /**
* enum iwl_fw_type - iwlwifi firmware type * enum iwl_fw_type - iwlwifi firmware type
* @IWL_FW_DVM: DVM firmware * @IWL_FW_DVM: DVM firmware
...@@ -298,7 +263,7 @@ struct iwl_fw { ...@@ -298,7 +263,7 @@ struct iwl_fw {
size_t n_dbg_mem_tlv; size_t n_dbg_mem_tlv;
size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX]; size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX];
u8 dbg_dest_reg_num; u8 dbg_dest_reg_num;
struct iwl_gscan_capabilities gscan_capa; u32 dbg_dump_mask;
}; };
static inline const char *get_fw_dbg_mode_string(int mode) static inline const char *get_fw_dbg_mode_string(int mode)
......
...@@ -168,7 +168,4 @@ void iwl_free_fw_paging(struct iwl_fw_runtime *fwrt); ...@@ -168,7 +168,4 @@ void iwl_free_fw_paging(struct iwl_fw_runtime *fwrt);
void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt); void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt);
void iwl_fwrt_handle_notification(struct iwl_fw_runtime *fwrt,
struct iwl_rx_cmd_buffer *rxb);
#endif /* __iwl_fw_runtime_h__ */ #endif /* __iwl_fw_runtime_h__ */
...@@ -402,35 +402,6 @@ static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len) ...@@ -402,35 +402,6 @@ static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len)
return 0; return 0;
} }
static void iwl_store_gscan_capa(struct iwl_fw *fw, const u8 *data,
const u32 len)
{
struct iwl_fw_gscan_capabilities *fw_capa = (void *)data;
struct iwl_gscan_capabilities *capa = &fw->gscan_capa;
capa->max_scan_cache_size = le32_to_cpu(fw_capa->max_scan_cache_size);
capa->max_scan_buckets = le32_to_cpu(fw_capa->max_scan_buckets);
capa->max_ap_cache_per_scan =
le32_to_cpu(fw_capa->max_ap_cache_per_scan);
capa->max_rssi_sample_size = le32_to_cpu(fw_capa->max_rssi_sample_size);
capa->max_scan_reporting_threshold =
le32_to_cpu(fw_capa->max_scan_reporting_threshold);
capa->max_hotlist_aps = le32_to_cpu(fw_capa->max_hotlist_aps);
capa->max_significant_change_aps =
le32_to_cpu(fw_capa->max_significant_change_aps);
capa->max_bssid_history_entries =
le32_to_cpu(fw_capa->max_bssid_history_entries);
capa->max_hotlist_ssids = le32_to_cpu(fw_capa->max_hotlist_ssids);
capa->max_number_epno_networks =
le32_to_cpu(fw_capa->max_number_epno_networks);
capa->max_number_epno_networks_by_ssid =
le32_to_cpu(fw_capa->max_number_epno_networks_by_ssid);
capa->max_number_of_white_listed_ssid =
le32_to_cpu(fw_capa->max_number_of_white_listed_ssid);
capa->max_number_of_black_listed_ssid =
le32_to_cpu(fw_capa->max_number_of_black_listed_ssid);
}
/* /*
* Gets uCode section from tlv. * Gets uCode section from tlv.
*/ */
...@@ -644,7 +615,6 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, ...@@ -644,7 +615,6 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
u32 build, paging_mem_size; u32 build, paging_mem_size;
int num_of_cpus; int num_of_cpus;
bool usniffer_req = false; bool usniffer_req = false;
bool gscan_capa = false;
if (len < sizeof(*ucode)) { if (len < sizeof(*ucode)) {
IWL_ERR(drv, "uCode has invalid length: %zd\n", len); IWL_ERR(drv, "uCode has invalid length: %zd\n", len);
...@@ -1043,6 +1013,17 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, ...@@ -1043,6 +1013,17 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
pieces->dbg_trigger_tlv_len[trigger_id] = tlv_len; pieces->dbg_trigger_tlv_len[trigger_id] = tlv_len;
break; break;
} }
case IWL_UCODE_TLV_FW_DBG_DUMP_LST: {
if (tlv_len != sizeof(u32)) {
IWL_ERR(drv,
"dbg lst mask size incorrect, skip\n");
break;
}
drv->fw.dbg_dump_mask =
le32_to_cpup((__le32 *)tlv_data);
break;
}
case IWL_UCODE_TLV_SEC_RT_USNIFFER: case IWL_UCODE_TLV_SEC_RT_USNIFFER:
*usniffer_images = true; *usniffer_images = true;
iwl_store_ucode_sec(pieces, tlv_data, iwl_store_ucode_sec(pieces, tlv_data,
...@@ -1079,16 +1060,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, ...@@ -1079,16 +1060,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
paging_mem_size; paging_mem_size;
break; break;
case IWL_UCODE_TLV_FW_GSCAN_CAPA: case IWL_UCODE_TLV_FW_GSCAN_CAPA:
/* /* ignored */
* Don't return an error in case of a shorter tlv_len
* to enable loading of FW that has an old format
* of GSCAN capabilities TLV.
*/
if (tlv_len < sizeof(struct iwl_fw_gscan_capabilities))
break;
iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len);
gscan_capa = true;
break; break;
case IWL_UCODE_TLV_FW_MEM_SEG: { case IWL_UCODE_TLV_FW_MEM_SEG: {
struct iwl_fw_dbg_mem_seg_tlv *dbg_mem = struct iwl_fw_dbg_mem_seg_tlv *dbg_mem =
...@@ -1153,19 +1125,6 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, ...@@ -1153,19 +1125,6 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
return -EINVAL; return -EINVAL;
} }
/*
* If ucode advertises that it supports GSCAN but GSCAN
* capabilities TLV is not present, or if it has an old format,
* warn and continue without GSCAN.
*/
if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) &&
!gscan_capa) {
IWL_DEBUG_INFO(drv,
"GSCAN is supported but capabilities TLV is unavailable\n");
__clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT,
capa->_capa);
}
return 0; return 0;
invalid_tlv_len: invalid_tlv_len:
...@@ -1316,6 +1275,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) ...@@ -1316,6 +1275,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context)
fw->ucode_capa.standard_phy_calibration_size = fw->ucode_capa.standard_phy_calibration_size =
IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE; IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE;
fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS; fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS;
/* dump all fw memory areas by default */
fw->dbg_dump_mask = 0xffffffff;
pieces = kzalloc(sizeof(*pieces), GFP_KERNEL); pieces = kzalloc(sizeof(*pieces), GFP_KERNEL);
if (!pieces) if (!pieces)
......
...@@ -439,6 +439,20 @@ struct iwl_trans_txq_scd_cfg { ...@@ -439,6 +439,20 @@ struct iwl_trans_txq_scd_cfg {
int frame_limit; int frame_limit;
}; };
/**
* struct iwl_trans_rxq_dma_data - RX queue DMA data
* @fr_bd_cb: DMA address of free BD cyclic buffer
* @fr_bd_wid: Initial write index of the free BD cyclic buffer
* @urbd_stts_wrptr: DMA address of urbd_stts_wrptr
* @ur_bd_cb: DMA address of used BD cyclic buffer
*/
struct iwl_trans_rxq_dma_data {
u64 fr_bd_cb;
u32 fr_bd_wid;
u64 urbd_stts_wrptr;
u64 ur_bd_cb;
};
/** /**
* struct iwl_trans_ops - transport specific operations * struct iwl_trans_ops - transport specific operations
* *
...@@ -559,6 +573,8 @@ struct iwl_trans_ops { ...@@ -559,6 +573,8 @@ struct iwl_trans_ops {
int cmd_id, int size, int cmd_id, int size,
unsigned int queue_wdg_timeout); unsigned int queue_wdg_timeout);
void (*txq_free)(struct iwl_trans *trans, int queue); void (*txq_free)(struct iwl_trans *trans, int queue);
int (*rxq_dma_data)(struct iwl_trans *trans, int queue,
struct iwl_trans_rxq_dma_data *data);
void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id, void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
bool shared); bool shared);
...@@ -755,6 +771,7 @@ struct iwl_trans { ...@@ -755,6 +771,7 @@ struct iwl_trans {
const struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv; const struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv;
const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX]; const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX];
struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv; struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv;
u32 dbg_dump_mask;
u8 dbg_dest_reg_num; u8 dbg_dest_reg_num;
enum iwl_plat_pm_mode system_pm_mode; enum iwl_plat_pm_mode system_pm_mode;
...@@ -947,6 +964,16 @@ iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn, ...@@ -947,6 +964,16 @@ iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn,
cfg, queue_wdg_timeout); cfg, queue_wdg_timeout);
} }
static inline int
iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue,
struct iwl_trans_rxq_dma_data *data)
{
if (WARN_ON_ONCE(!trans->ops->rxq_dma_data))
return -ENOTSUPP;
return trans->ops->rxq_dma_data(trans, queue, data);
}
static inline void static inline void
iwl_trans_txq_free(struct iwl_trans *trans, int queue) iwl_trans_txq_free(struct iwl_trans *trans, int queue)
{ {
......
...@@ -1037,6 +1037,13 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, ...@@ -1037,6 +1037,13 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR); cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR);
#endif #endif
/*
* TODO: this is needed because the firmware is not stopping
* the recording automatically before entering D3. This can
* be removed once the FW starts doing that.
*/
iwl_fw_dbg_stop_recording(&mvm->fwrt);
/* must be last -- this switches firmware state */ /* must be last -- this switches firmware state */
ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd); ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd);
if (ret) if (ret)
......
...@@ -1150,6 +1150,10 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm, ...@@ -1150,6 +1150,10 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
struct iwl_rx_mpdu_desc *desc; struct iwl_rx_mpdu_desc *desc;
int bin_len = count / 2; int bin_len = count / 2;
int ret = -EINVAL; int ret = -EINVAL;
size_t mpdu_cmd_hdr_size =
(mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
sizeof(struct iwl_rx_mpdu_desc) :
IWL_RX_DESC_SIZE_V1;
if (!iwl_mvm_firmware_running(mvm)) if (!iwl_mvm_firmware_running(mvm))
return -EIO; return -EIO;
...@@ -1168,7 +1172,7 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm, ...@@ -1168,7 +1172,7 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
goto out; goto out;
/* avoid invalid memory access */ /* avoid invalid memory access */
if (bin_len < sizeof(*pkt) + sizeof(*desc)) if (bin_len < sizeof(*pkt) + mpdu_cmd_hdr_size)
goto out; goto out;
/* check this is RX packet */ /* check this is RX packet */
...@@ -1179,7 +1183,7 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm, ...@@ -1179,7 +1183,7 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm,
/* check the length in metadata matches actual received length */ /* check the length in metadata matches actual received length */
desc = (void *)pkt->data; desc = (void *)pkt->data;
if (le16_to_cpu(desc->mpdu_len) != if (le16_to_cpu(desc->mpdu_len) !=
(bin_len - sizeof(*desc) - sizeof(*pkt))) (bin_len - mpdu_cmd_hdr_size - sizeof(*pkt)))
goto out; goto out;
local_bh_disable(); local_bh_disable();
......
...@@ -130,6 +130,41 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm) ...@@ -130,6 +130,41 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm)
return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd);
} }
static int iwl_configure_rxq(struct iwl_mvm *mvm)
{
int i, num_queues, size;
struct iwl_rfh_queue_config *cmd;
/* Do not configure default queue, it is configured via context info */
num_queues = mvm->trans->num_rx_queues - 1;
size = sizeof(*cmd) + num_queues * sizeof(struct iwl_rfh_queue_data);
cmd = kzalloc(size, GFP_KERNEL);
if (!cmd)
return -ENOMEM;
cmd->num_queues = num_queues;
for (i = 0; i < num_queues; i++) {
struct iwl_trans_rxq_dma_data data;
cmd->data[i].q_num = i + 1;
iwl_trans_get_rxq_dma_data(mvm->trans, i + 1, &data);
cmd->data[i].fr_bd_cb = cpu_to_le64(data.fr_bd_cb);
cmd->data[i].urbd_stts_wrptr =
cpu_to_le64(data.urbd_stts_wrptr);
cmd->data[i].ur_bd_cb = cpu_to_le64(data.ur_bd_cb);
cmd->data[i].fr_bd_wid = cpu_to_le32(data.fr_bd_wid);
}
return iwl_mvm_send_cmd_pdu(mvm,
WIDE_ID(DATA_PATH_GROUP,
RFH_QUEUE_CONFIG_CMD),
0, size, cmd);
}
static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm) static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm)
{ {
struct iwl_dqa_enable_cmd dqa_cmd = { struct iwl_dqa_enable_cmd dqa_cmd = {
...@@ -1007,9 +1042,16 @@ int iwl_mvm_up(struct iwl_mvm *mvm) ...@@ -1007,9 +1042,16 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
goto error; goto error;
/* Init RSS configuration */ /* Init RSS configuration */
/* TODO - remove 22000 disablement when we have RXQ config API */ if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
if (iwl_mvm_has_new_rx_api(mvm) && ret = iwl_configure_rxq(mvm);
mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_22000) { if (ret) {
IWL_ERR(mvm, "Failed to configure RX queues: %d\n",
ret);
goto error;
}
}
if (iwl_mvm_has_new_rx_api(mvm)) {
ret = iwl_send_rss_cfg_cmd(mvm); ret = iwl_send_rss_cfg_cmd(mvm);
if (ret) { if (ret) {
IWL_ERR(mvm, "Failed to configure RSS queues: %d\n", IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
......
...@@ -4558,13 +4558,6 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, ...@@ -4558,13 +4558,6 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
atomic_set(&mvm->queue_sync_counter, atomic_set(&mvm->queue_sync_counter,
mvm->trans->num_rx_queues); mvm->trans->num_rx_queues);
/* TODO - remove this when we have RXQ config API */
if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
qmask = BIT(0);
if (notif->sync)
atomic_set(&mvm->queue_sync_counter, 1);
}
ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size); ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size);
if (ret) { if (ret) {
IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret); IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret);
......
...@@ -449,6 +449,7 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = { ...@@ -449,6 +449,7 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
HCMD_NAME(UPDATE_MU_GROUPS_CMD), HCMD_NAME(UPDATE_MU_GROUPS_CMD),
HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD), HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
HCMD_NAME(STA_HE_CTXT_CMD), HCMD_NAME(STA_HE_CTXT_CMD),
HCMD_NAME(RFH_QUEUE_CONFIG_CMD),
HCMD_NAME(STA_PM_NOTIF), HCMD_NAME(STA_PM_NOTIF),
HCMD_NAME(MU_GROUP_MGMT_NOTIF), HCMD_NAME(MU_GROUP_MGMT_NOTIF),
HCMD_NAME(RX_QUEUES_NOTIFICATION), HCMD_NAME(RX_QUEUES_NOTIFICATION),
...@@ -621,7 +622,11 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ...@@ -621,7 +622,11 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
if (iwl_mvm_has_new_rx_api(mvm)) { if (iwl_mvm_has_new_rx_api(mvm)) {
op_mode->ops = &iwl_mvm_ops_mq; op_mode->ops = &iwl_mvm_ops_mq;
trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_desc); trans->rx_mpdu_cmd_hdr_size =
(trans->cfg->device_family >=
IWL_DEVICE_FAMILY_22560) ?
sizeof(struct iwl_rx_mpdu_desc) :
IWL_RX_DESC_SIZE_V1;
} else { } else {
op_mode->ops = &iwl_mvm_ops; op_mode->ops = &iwl_mvm_ops;
trans->rx_mpdu_cmd_hdr_size = trans->rx_mpdu_cmd_hdr_size =
...@@ -704,11 +709,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ...@@ -704,11 +709,13 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
} }
/* the hardware splits the A-MSDU */ /* the hardware splits the A-MSDU */
if (mvm->trans->cfg->device_family >= if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
IWL_DEVICE_FAMILY_22560)
trans_cfg.rx_buf_size = IWL_AMSDU_2K; trans_cfg.rx_buf_size = IWL_AMSDU_2K;
else if (mvm->cfg->mq_rx_supported) /* TODO: remove when balanced power mode is fw supported */
iwlmvm_mod_params.power_scheme = IWL_POWER_SCHEME_CAM;
} else if (mvm->cfg->mq_rx_supported) {
trans_cfg.rx_buf_size = IWL_AMSDU_4K; trans_cfg.rx_buf_size = IWL_AMSDU_4K;
}
trans->wide_cmd_header = true; trans->wide_cmd_header = true;
trans_cfg.bc_table_dword = trans_cfg.bc_table_dword =
...@@ -743,6 +750,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, ...@@ -743,6 +750,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv, memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv,
sizeof(trans->dbg_conf_tlv)); sizeof(trans->dbg_conf_tlv));
trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv; trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv;
trans->dbg_dump_mask = mvm->fw->dbg_dump_mask;
trans->iml = mvm->fw->iml; trans->iml = mvm->fw->iml;
trans->iml_len = mvm->fw->iml_len; trans->iml_len = mvm->fw->iml_len;
...@@ -1008,10 +1016,8 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm, ...@@ -1008,10 +1016,8 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
list_add_tail(&entry->list, &mvm->async_handlers_list); list_add_tail(&entry->list, &mvm->async_handlers_list);
spin_unlock(&mvm->async_handlers_lock); spin_unlock(&mvm->async_handlers_lock);
schedule_work(&mvm->async_handlers_wk); schedule_work(&mvm->async_handlers_wk);
return; break;
} }
iwl_fwrt_handle_notification(&mvm->fwrt, rxb);
} }
static void iwl_mvm_rx(struct iwl_op_mode *op_mode, static void iwl_mvm_rx(struct iwl_op_mode *op_mode,
......
...@@ -215,15 +215,14 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, ...@@ -215,15 +215,14 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
} }
static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
struct iwl_rx_mpdu_desc *desc, struct ieee80211_rx_status *rx_status,
struct ieee80211_rx_status *rx_status) u32 rate_n_flags, int energy_a,
int energy_b)
{ {
int energy_a, energy_b, max_energy; int max_energy;
u32 rate_flags = le32_to_cpu(desc->rate_n_flags); u32 rate_flags = rate_n_flags;
energy_a = desc->energy_a;
energy_a = energy_a ? -energy_a : S8_MIN; energy_a = energy_a ? -energy_a : S8_MIN;
energy_b = desc->energy_b;
energy_b = energy_b ? -energy_b : S8_MIN; energy_b = energy_b ? -energy_b : S8_MIN;
max_energy = max(energy_a, energy_b); max_energy = max(energy_a, energy_b);
...@@ -368,7 +367,8 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue, ...@@ -368,7 +367,8 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue,
tid = IWL_MAX_TID_COUNT; tid = IWL_MAX_TID_COUNT;
/* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */ /* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */
sub_frame_idx = desc->amsdu_info & IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK; sub_frame_idx = desc->amsdu_info &
IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK;
if (unlikely(ieee80211_has_retry(hdr->frame_control) && if (unlikely(ieee80211_has_retry(hdr->frame_control) &&
dup_data->last_seq[tid] == hdr->seq_ctrl && dup_data->last_seq[tid] == hdr->seq_ctrl &&
...@@ -862,23 +862,41 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, ...@@ -862,23 +862,41 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct ieee80211_rx_status *rx_status; struct ieee80211_rx_status *rx_status;
struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_rx_mpdu_desc *desc = (void *)pkt->data; struct iwl_rx_mpdu_desc *desc = (void *)pkt->data;
struct ieee80211_hdr *hdr = (void *)(pkt->data + sizeof(*desc)); struct ieee80211_hdr *hdr;
u32 len = le16_to_cpu(desc->mpdu_len); u32 len = le16_to_cpu(desc->mpdu_len);
u32 rate_n_flags = le32_to_cpu(desc->rate_n_flags); u32 rate_n_flags, gp2_on_air_rise;
u16 phy_info = le16_to_cpu(desc->phy_info); u16 phy_info = le16_to_cpu(desc->phy_info);
struct ieee80211_sta *sta = NULL; struct ieee80211_sta *sta = NULL;
struct sk_buff *skb; struct sk_buff *skb;
u8 crypt_len = 0; u8 crypt_len = 0, channel, energy_a, energy_b;
struct ieee80211_radiotap_he *he = NULL; struct ieee80211_radiotap_he *he = NULL;
struct ieee80211_radiotap_he_mu *he_mu = NULL; struct ieee80211_radiotap_he_mu *he_mu = NULL;
u32 he_type = 0xffffffff; u32 he_type = 0xffffffff;
/* this is invalid e.g. because puncture type doesn't allow 0b11 */ /* this is invalid e.g. because puncture type doesn't allow 0b11 */
#define HE_PHY_DATA_INVAL ((u64)-1) #define HE_PHY_DATA_INVAL ((u64)-1)
u64 he_phy_data = HE_PHY_DATA_INVAL; u64 he_phy_data = HE_PHY_DATA_INVAL;
size_t desc_size;
if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))) if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
return; return;
if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags);
channel = desc->v3.channel;
gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise);
energy_a = desc->v3.energy_a;
energy_b = desc->v3.energy_b;
desc_size = sizeof(*desc);
} else {
rate_n_flags = le32_to_cpu(desc->v1.rate_n_flags);
channel = desc->v1.channel;
gp2_on_air_rise = le32_to_cpu(desc->v1.gp2_on_air_rise);
energy_a = desc->v1.energy_a;
energy_b = desc->v1.energy_b;
desc_size = IWL_RX_DESC_SIZE_V1;
}
hdr = (void *)(pkt->data + desc_size);
/* Dont use dev_alloc_skb(), we'll have enough headroom once /* Dont use dev_alloc_skb(), we'll have enough headroom once
* ieee80211_hdr pulled. * ieee80211_hdr pulled.
*/ */
...@@ -925,8 +943,11 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, ...@@ -925,8 +943,11 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) { if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) {
he_phy_data = if (mvm->trans->cfg->device_family >=
le64_to_cpu(desc->he_phy_data); IWL_DEVICE_FAMILY_22560)
he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
else
he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
if (he_type == RATE_MCS_HE_TYPE_MU) { if (he_type == RATE_MCS_HE_TYPE_MU) {
he_mu = skb_put_data(skb, &mu_known, he_mu = skb_put_data(skb, &mu_known,
...@@ -940,6 +961,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, ...@@ -940,6 +961,8 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
__skb_pull(skb, radiotap_len); __skb_pull(skb, radiotap_len);
} }
rx_status = IEEE80211_SKB_RXCB(skb);
if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, phy_info, desc, if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, phy_info, desc,
le32_to_cpu(pkt->len_n_flags), queue, le32_to_cpu(pkt->len_n_flags), queue,
&crypt_len)) { &crypt_len)) {
...@@ -962,14 +985,28 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, ...@@ -962,14 +985,28 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE; rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE;
if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) { if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) {
rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise); u64 tsf_on_air_rise;
if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
tsf_on_air_rise = le64_to_cpu(desc->v3.tsf_on_air_rise);
else
tsf_on_air_rise = le64_to_cpu(desc->v1.tsf_on_air_rise);
rx_status->mactime = tsf_on_air_rise;
/* TSF as indicated by the firmware is at INA time */ /* TSF as indicated by the firmware is at INA time */
rx_status->flag |= RX_FLAG_MACTIME_PLCP_START; rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
} else if (he_type == RATE_MCS_HE_TYPE_SU) { } else if (he_type == RATE_MCS_HE_TYPE_SU) {
u64 he_phy_data;
if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
else
he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
he->data1 |= he->data1 |=
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN); cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN);
if (FIELD_GET(IWL_RX_HE_PHY_UPLINK, if (FIELD_GET(IWL_RX_HE_PHY_UPLINK,
le64_to_cpu(desc->he_phy_data))) he_phy_data))
he->data3 |= he->data3 |=
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA3_UL_DL); cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA3_UL_DL);
...@@ -980,7 +1017,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, ...@@ -980,7 +1017,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->flag |= RX_FLAG_AMPDU_DETAILS; rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF, if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF,
le64_to_cpu(desc->he_phy_data))) he_phy_data))
rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
} }
} else if (he_mu && he_phy_data != HE_PHY_DATA_INVAL) { } else if (he_mu && he_phy_data != HE_PHY_DATA_INVAL) {
...@@ -1005,16 +1042,23 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, ...@@ -1005,16 +1042,23 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
he_phy_data), he_phy_data),
IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW); IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW);
} }
rx_status->device_timestamp = le32_to_cpu(desc->gp2_on_air_rise); rx_status->device_timestamp = gp2_on_air_rise;
rx_status->band = desc->channel > 14 ? NL80211_BAND_5GHZ : rx_status->band = channel > 14 ? NL80211_BAND_5GHZ :
NL80211_BAND_2GHZ; NL80211_BAND_2GHZ;
rx_status->freq = ieee80211_channel_to_frequency(desc->channel, rx_status->freq = ieee80211_channel_to_frequency(channel,
rx_status->band); rx_status->band);
iwl_mvm_get_signal_strength(mvm, desc, rx_status); iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, energy_a,
energy_b);
/* update aggregation data for monitor sake on default queue */ /* update aggregation data for monitor sake on default queue */
if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) { if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE; bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE;
u64 he_phy_data;
if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
else
he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
rx_status->flag |= RX_FLAG_AMPDU_DETAILS; rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
rx_status->ampdu_reference = mvm->ampdu_ref; rx_status->ampdu_reference = mvm->ampdu_ref;
...@@ -1027,7 +1071,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, ...@@ -1027,7 +1071,7 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
he_type == RATE_MCS_HE_TYPE_MU) { he_type == RATE_MCS_HE_TYPE_MU) {
rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF, if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF,
le64_to_cpu(desc->he_phy_data))) he_phy_data))
rx_status->flag |= rx_status->flag |=
RX_FLAG_AMPDU_EOF_BIT; RX_FLAG_AMPDU_EOF_BIT;
} }
...@@ -1327,12 +1371,19 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, ...@@ -1327,12 +1371,19 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
break; break;
case RATE_MCS_HE_TYPE_MU: { case RATE_MCS_HE_TYPE_MU: {
u16 val; u16 val;
u64 he_phy_data;
if (mvm->trans->cfg->device_family >=
IWL_DEVICE_FAMILY_22560)
he_phy_data = le64_to_cpu(desc->v3.he_phy_data);
else
he_phy_data = le64_to_cpu(desc->v1.he_phy_data);
if (he_phy_data == HE_PHY_DATA_INVAL) if (he_phy_data == HE_PHY_DATA_INVAL)
break; break;
val = FIELD_GET(IWL_RX_HE_PHY_HE_LTF_NUM_MASK, val = FIELD_GET(IWL_RX_HE_PHY_HE_LTF_NUM_MASK,
le64_to_cpu(desc->he_phy_data)); he_phy_data);
he->data2 |= he->data2 |=
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN); cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
......
...@@ -806,17 +806,32 @@ static const struct pci_device_id iwl_hw_card_ids[] = { ...@@ -806,17 +806,32 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0xA370, 0x42A4, iwl9462_2ac_cfg_soc)}, {IWL_PCI_DEVICE(0xA370, 0x42A4, iwl9462_2ac_cfg_soc)},
/* 22000 Series */ /* 22000 Series */
{IWL_PCI_DEVICE(0x2720, 0x0A10, iwl22000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ac_cfg_jf)},
{IWL_PCI_DEVICE(0x2720, 0x0000, iwl22000_2ax_cfg_hr)}, {IWL_PCI_DEVICE(0x2720, 0x0000, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22000_2ax_cfg_hr)}, {IWL_PCI_DEVICE(0x2720, 0x0040, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0078, iwl22000_2ax_cfg_hr)}, {IWL_PCI_DEVICE(0x2720, 0x0078, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0070, iwl22000_2ac_cfg_hr_cdb)}, {IWL_PCI_DEVICE(0x2720, 0x0070, iwl22000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x2720, 0x0030, iwl22000_2ac_cfg_hr_cdb)}, {IWL_PCI_DEVICE(0x2720, 0x0030, iwl22000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x2720, 0x1080, iwl22000_2ax_cfg_hr)}, {IWL_PCI_DEVICE(0x2720, 0x1080, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0090, iwl22000_2ac_cfg_hr_cdb)}, {IWL_PCI_DEVICE(0x2720, 0x0090, iwl22000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x2720, 0x0310, iwl22000_2ac_cfg_hr_cdb)}, {IWL_PCI_DEVICE(0x2720, 0x0310, iwl22000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x34F0, 0x0040, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x34F0, 0x0078, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ac_cfg_jf)},
{IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22560_2ax_cfg_su_cdb)},
{IWL_PCI_DEVICE(0x40C0, 0x0010, iwl22560_2ax_cfg_su_cdb)},
{IWL_PCI_DEVICE(0x40c0, 0x0090, iwl22560_2ax_cfg_su_cdb)},
{IWL_PCI_DEVICE(0x40C0, 0x0310, iwl22560_2ax_cfg_su_cdb)},
{IWL_PCI_DEVICE(0x40C0, 0x0A10, iwl22560_2ax_cfg_su_cdb)},
{IWL_PCI_DEVICE(0x43F0, 0x0040, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x43F0, 0x0070, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x43F0, 0x0078, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x0000, iwl22000_2ax_cfg_hr)}, {IWL_PCI_DEVICE(0xA0F0, 0x0000, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x0040, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x0070, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x0078, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x00B0, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x0A10, iwl22000_2ax_cfg_hr)},
#endif /* CONFIG_IWLMVM */ #endif /* CONFIG_IWLMVM */
...@@ -979,6 +994,10 @@ static int iwl_pci_resume(struct device *device) ...@@ -979,6 +994,10 @@ static int iwl_pci_resume(struct device *device)
if (!trans->op_mode) if (!trans->op_mode)
return 0; return 0;
/* In WOWLAN, let iwl_trans_pcie_d3_resume do the rest of the work */
if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
return 0;
/* reconfigure the MSI-X mapping to get the correct IRQ for rfkill */ /* reconfigure the MSI-X mapping to get the correct IRQ for rfkill */
iwl_pcie_conf_msix_hw(trans_pcie); iwl_pcie_conf_msix_hw(trans_pcie);
......
...@@ -45,6 +45,7 @@ ...@@ -45,6 +45,7 @@
#include "iwl-debug.h" #include "iwl-debug.h"
#include "iwl-io.h" #include "iwl-io.h"
#include "iwl-op-mode.h" #include "iwl-op-mode.h"
#include "iwl-drv.h"
/* We need 2 entries for the TX command and header, and another one might /* We need 2 entries for the TX command and header, and another one might
* be needed for potential data in the SKB's head. The remaining ones can * be needed for potential data in the SKB's head. The remaining ones can
...@@ -72,6 +73,7 @@ struct iwl_host_cmd; ...@@ -72,6 +73,7 @@ struct iwl_host_cmd;
* @page: driver's pointer to the rxb page * @page: driver's pointer to the rxb page
* @invalid: rxb is in driver ownership - not owned by HW * @invalid: rxb is in driver ownership - not owned by HW
* @vid: index of this rxb in the global table * @vid: index of this rxb in the global table
* @size: size used from the buffer
*/ */
struct iwl_rx_mem_buffer { struct iwl_rx_mem_buffer {
dma_addr_t page_dma; dma_addr_t page_dma;
...@@ -79,6 +81,7 @@ struct iwl_rx_mem_buffer { ...@@ -79,6 +81,7 @@ struct iwl_rx_mem_buffer {
u16 vid; u16 vid;
bool invalid; bool invalid;
struct list_head list; struct list_head list;
u32 size;
}; };
/** /**
...@@ -159,8 +162,10 @@ enum iwl_completion_desc_wifi_status { ...@@ -159,8 +162,10 @@ enum iwl_completion_desc_wifi_status {
IWL_CD_STTS_REPLAY_ERR, IWL_CD_STTS_REPLAY_ERR,
}; };
#define IWL_RX_TD_TYPE 0xff000000 #define IWL_RX_TD_TYPE_MSK 0xff000000
#define IWL_RX_TD_SIZE 0x00ffffff #define IWL_RX_TD_SIZE_MSK 0x00ffffff
#define IWL_RX_TD_SIZE_2K BIT(11)
#define IWL_RX_TD_TYPE 0
/** /**
* struct iwl_rx_transfer_desc - transfer descriptor * struct iwl_rx_transfer_desc - transfer descriptor
...@@ -204,6 +209,7 @@ struct iwl_rx_completion_desc { ...@@ -204,6 +209,7 @@ struct iwl_rx_completion_desc {
* @id: queue index * @id: queue index
* @bd: driver's pointer to buffer of receive buffer descriptors (rbd). * @bd: driver's pointer to buffer of receive buffer descriptors (rbd).
* Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices. * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices.
* In 22560 devices it is a pointer to a list of iwl_rx_transfer_desc's
* @bd_dma: bus address of buffer of receive buffer descriptors (rbd) * @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
* @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd) * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
* @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd) * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
...@@ -230,7 +236,11 @@ struct iwl_rxq { ...@@ -230,7 +236,11 @@ struct iwl_rxq {
int id; int id;
void *bd; void *bd;
dma_addr_t bd_dma; dma_addr_t bd_dma;
__le32 *used_bd; union {
void *used_bd;
__le32 *bd_32;
struct iwl_rx_completion_desc *cd;
};
dma_addr_t used_bd_dma; dma_addr_t used_bd_dma;
__le16 *tr_tail; __le16 *tr_tail;
dma_addr_t tr_tail_dma; dma_addr_t tr_tail_dma;
...@@ -245,7 +255,7 @@ struct iwl_rxq { ...@@ -245,7 +255,7 @@ struct iwl_rxq {
struct list_head rx_free; struct list_head rx_free;
struct list_head rx_used; struct list_head rx_used;
bool need_update; bool need_update;
struct iwl_rb_status *rb_stts; void *rb_stts;
dma_addr_t rb_stts_dma; dma_addr_t rb_stts_dma;
spinlock_t lock; spinlock_t lock;
struct napi_struct napi; struct napi_struct napi;
...@@ -289,6 +299,24 @@ static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index) ...@@ -289,6 +299,24 @@ static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index)
return ++index & (trans->cfg->base_params->max_tfd_queue_size - 1); return ++index & (trans->cfg->base_params->max_tfd_queue_size - 1);
} }
/**
* iwl_get_closed_rb_stts - get closed rb stts from different structs
* @rxq - the rxq to get the rb stts from
*/
static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans,
struct iwl_rxq *rxq)
{
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
__le16 *rb_stts = rxq->rb_stts;
return READ_ONCE(*rb_stts);
} else {
struct iwl_rb_status *rb_stts = rxq->rb_stts;
return READ_ONCE(rb_stts->closed_rb_num);
}
}
/** /**
* iwl_queue_dec_wrap - decrement queue index, wrap back to end * iwl_queue_dec_wrap - decrement queue index, wrap back to end
* @index -- current index * @index -- current index
...@@ -612,6 +640,20 @@ IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans) ...@@ -612,6 +640,20 @@ IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans)
return (void *)trans->trans_specific; return (void *)trans->trans_specific;
} }
static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
struct msix_entry *entry)
{
/*
* Before sending the interrupt the HW disables it to prevent
* a nested interrupt. This is done by writing 1 to the corresponding
* bit in the mask register. After handling the interrupt, it should be
* re-enabled by clearing this bit. This register is defined as
* write 1 clear (W1C) register, meaning that it's being clear
* by writing 1 to the bit.
*/
iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
}
static inline struct iwl_trans * static inline struct iwl_trans *
iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie) iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie)
{ {
...@@ -639,6 +681,11 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id); ...@@ -639,6 +681,11 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id);
irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id); irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id);
int iwl_pcie_rx_stop(struct iwl_trans *trans); int iwl_pcie_rx_stop(struct iwl_trans *trans);
void iwl_pcie_rx_free(struct iwl_trans *trans); void iwl_pcie_rx_free(struct iwl_trans *trans);
void iwl_pcie_free_rbs_pool(struct iwl_trans *trans);
void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq);
int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget);
void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
struct iwl_rxq *rxq);
/***************************************************** /*****************************************************
* ICT - interrupt handling * ICT - interrupt handling
...@@ -865,6 +912,29 @@ static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans, ...@@ -865,6 +912,29 @@ static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans,
return txq->tfds + trans_pcie->tfd_size * idx; return txq->tfds + trans_pcie->tfd_size * idx;
} }
static inline const char *queue_name(struct device *dev,
struct iwl_trans_pcie *trans_p, int i)
{
if (trans_p->shared_vec_mask) {
int vec = trans_p->shared_vec_mask &
IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
if (i == 0)
return DRV_NAME ": shared IRQ";
return devm_kasprintf(dev, GFP_KERNEL,
DRV_NAME ": queue %d", i + vec);
}
if (i == 0)
return DRV_NAME ": default queue";
if (i == trans_p->alloc_vecs - 1)
return DRV_NAME ": exception";
return devm_kasprintf(dev, GFP_KERNEL,
DRV_NAME ": queue %d", i);
}
static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
......
...@@ -242,6 +242,25 @@ static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) ...@@ -242,6 +242,25 @@ static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
} }
} }
static void iwl_pcie_restock_bd(struct iwl_trans *trans,
struct iwl_rxq *rxq,
struct iwl_rx_mem_buffer *rxb)
{
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
struct iwl_rx_transfer_desc *bd = rxq->bd;
bd[rxq->write].type_n_size =
cpu_to_le32((IWL_RX_TD_TYPE & IWL_RX_TD_TYPE_MSK) |
((IWL_RX_TD_SIZE_2K >> 8) & IWL_RX_TD_SIZE_MSK));
bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
} else {
__le64 *bd = rxq->bd;
bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
}
}
/* /*
* iwl_pcie_rxmq_restock - restock implementation for multi-queue rx * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx
*/ */
...@@ -263,8 +282,6 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans, ...@@ -263,8 +282,6 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
spin_lock(&rxq->lock); spin_lock(&rxq->lock);
while (rxq->free_count) { while (rxq->free_count) {
__le64 *bd = (__le64 *)rxq->bd;
/* Get next free Rx buffer, remove from free list */ /* Get next free Rx buffer, remove from free list */
rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
list); list);
...@@ -273,7 +290,7 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans, ...@@ -273,7 +290,7 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
/* 12 first bits are expected to be empty */ /* 12 first bits are expected to be empty */
WARN_ON(rxb->page_dma & DMA_BIT_MASK(12)); WARN_ON(rxb->page_dma & DMA_BIT_MASK(12));
/* Point to Rx buffer via next RBD in circular buffer */ /* Point to Rx buffer via next RBD in circular buffer */
bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid); iwl_pcie_restock_bd(trans, rxq, rxb);
rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK; rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK;
rxq->free_count--; rxq->free_count--;
} }
...@@ -400,8 +417,8 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, ...@@ -400,8 +417,8 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
* iwl_pcie_rxq_restock. The latter function will update the HW to use the newly * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
* allocated buffers. * allocated buffers.
*/ */
static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
struct iwl_rxq *rxq) struct iwl_rxq *rxq)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rx_mem_buffer *rxb; struct iwl_rx_mem_buffer *rxb;
...@@ -457,7 +474,7 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, ...@@ -457,7 +474,7 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
} }
} }
static void iwl_pcie_free_rbs_pool(struct iwl_trans *trans) void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int i; int i;
...@@ -617,28 +634,44 @@ void iwl_pcie_rx_allocator_work(struct work_struct *data) ...@@ -617,28 +634,44 @@ void iwl_pcie_rx_allocator_work(struct work_struct *data)
iwl_pcie_rx_allocator(trans_pcie->trans); iwl_pcie_rx_allocator(trans_pcie->trans);
} }
static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td)
{
struct iwl_rx_transfer_desc *rx_td;
if (use_rx_td)
return sizeof(*rx_td);
else
return trans->cfg->mq_rx_supported ? sizeof(__le64) :
sizeof(__le32);
}
static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans, static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
struct iwl_rxq *rxq) struct iwl_rxq *rxq)
{ {
struct device *dev = trans->dev; struct device *dev = trans->dev;
int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) : bool use_rx_td = (trans->cfg->device_family >=
sizeof(__le32); IWL_DEVICE_FAMILY_22560);
int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
if (rxq->bd) if (rxq->bd)
dma_free_coherent(dev, free_size * rxq->queue_size, dma_free_coherent(trans->dev,
free_size * rxq->queue_size,
rxq->bd, rxq->bd_dma); rxq->bd, rxq->bd_dma);
rxq->bd_dma = 0; rxq->bd_dma = 0;
rxq->bd = NULL; rxq->bd = NULL;
if (rxq->rb_stts) if (rxq->rb_stts)
dma_free_coherent(trans->dev, dma_free_coherent(trans->dev,
use_rx_td ? sizeof(__le16) :
sizeof(struct iwl_rb_status), sizeof(struct iwl_rb_status),
rxq->rb_stts, rxq->rb_stts_dma); rxq->rb_stts, rxq->rb_stts_dma);
rxq->rb_stts_dma = 0; rxq->rb_stts_dma = 0;
rxq->rb_stts = NULL; rxq->rb_stts = NULL;
if (rxq->used_bd) if (rxq->used_bd)
dma_free_coherent(dev, sizeof(__le32) * rxq->queue_size, dma_free_coherent(trans->dev,
(use_rx_td ? sizeof(*rxq->cd) :
sizeof(__le32)) * rxq->queue_size,
rxq->used_bd, rxq->used_bd_dma); rxq->used_bd, rxq->used_bd_dma);
rxq->used_bd_dma = 0; rxq->used_bd_dma = 0;
rxq->used_bd = NULL; rxq->used_bd = NULL;
...@@ -665,8 +698,9 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, ...@@ -665,8 +698,9 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct device *dev = trans->dev; struct device *dev = trans->dev;
int i; int i;
int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) : int free_size;
sizeof(__le32); bool use_rx_td = (trans->cfg->device_family >=
IWL_DEVICE_FAMILY_22560);
spin_lock_init(&rxq->lock); spin_lock_init(&rxq->lock);
if (trans->cfg->mq_rx_supported) if (trans->cfg->mq_rx_supported)
...@@ -674,6 +708,8 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, ...@@ -674,6 +708,8 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
else else
rxq->queue_size = RX_QUEUE_SIZE; rxq->queue_size = RX_QUEUE_SIZE;
free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
/* /*
* Allocate the circular buffer of Read Buffer Descriptors * Allocate the circular buffer of Read Buffer Descriptors
* (RBDs) * (RBDs)
...@@ -686,7 +722,9 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, ...@@ -686,7 +722,9 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
if (trans->cfg->mq_rx_supported) { if (trans->cfg->mq_rx_supported) {
rxq->used_bd = dma_zalloc_coherent(dev, rxq->used_bd = dma_zalloc_coherent(dev,
sizeof(__le32) * (use_rx_td ?
sizeof(*rxq->cd) :
sizeof(__le32)) *
rxq->queue_size, rxq->queue_size,
&rxq->used_bd_dma, &rxq->used_bd_dma,
GFP_KERNEL); GFP_KERNEL);
...@@ -695,13 +733,15 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, ...@@ -695,13 +733,15 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
} }
/* Allocate the driver's pointer to receive buffer status */ /* Allocate the driver's pointer to receive buffer status */
rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), rxq->rb_stts = dma_zalloc_coherent(dev, use_rx_td ?
sizeof(__le16) :
sizeof(struct iwl_rb_status),
&rxq->rb_stts_dma, &rxq->rb_stts_dma,
GFP_KERNEL); GFP_KERNEL);
if (!rxq->rb_stts) if (!rxq->rb_stts)
goto err; goto err;
if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) if (!use_rx_td)
return 0; return 0;
/* Allocate the driver's pointer to TR tail */ /* Allocate the driver's pointer to TR tail */
...@@ -717,6 +757,11 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, ...@@ -717,6 +757,11 @@ static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
GFP_KERNEL); GFP_KERNEL);
if (!rxq->cr_tail) if (!rxq->cr_tail)
goto err; goto err;
/*
* W/A 22560 device step Z0 must be non zero bug
* TODO: remove this when stop supporting Z0
*/
*rxq->cr_tail = cpu_to_le16(500);
return 0; return 0;
...@@ -941,7 +986,7 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) ...@@ -941,7 +986,7 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
iwl_pcie_enable_rx_wake(trans, true); iwl_pcie_enable_rx_wake(trans, true);
} }
static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
{ {
lockdep_assert_held(&rxq->lock); lockdep_assert_held(&rxq->lock);
...@@ -951,7 +996,7 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) ...@@ -951,7 +996,7 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
rxq->used_count = 0; rxq->used_count = 0;
} }
static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget) int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget)
{ {
WARN_ON(1); WARN_ON(1);
return 0; return 0;
...@@ -1000,7 +1045,9 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans) ...@@ -1000,7 +1045,9 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans)
rxq->read = 0; rxq->read = 0;
rxq->write = 0; rxq->write = 0;
rxq->write_actual = 0; rxq->write_actual = 0;
memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); memset(rxq->rb_stts, 0,
(trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
sizeof(__le16) : sizeof(struct iwl_rb_status));
iwl_pcie_rx_init_rxb_lists(rxq); iwl_pcie_rx_init_rxb_lists(rxq);
...@@ -1249,6 +1296,8 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, ...@@ -1249,6 +1296,8 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
} }
page_stolen |= rxcb._page_stolen; page_stolen |= rxcb._page_stolen;
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
break;
offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN); offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
} }
...@@ -1283,6 +1332,45 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, ...@@ -1283,6 +1332,45 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
} }
static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
struct iwl_rxq *rxq, int i)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rx_mem_buffer *rxb;
u16 vid;
if (!trans->cfg->mq_rx_supported) {
rxb = rxq->queue[i];
rxq->queue[i] = NULL;
return rxb;
}
/* used_bd is a 32/16 bit but only 12 are used to retrieve the vid */
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF;
else
vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF;
if (!vid || vid > ARRAY_SIZE(trans_pcie->global_table))
goto out_err;
rxb = trans_pcie->global_table[vid - 1];
if (rxb->invalid)
goto out_err;
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
rxb->size = le32_to_cpu(rxq->cd[i].size) & IWL_RX_CD_SIZE;
rxb->invalid = true;
return rxb;
out_err:
WARN(1, "Invalid rxb from HW %u\n", (u32)vid);
iwl_force_nmi(trans);
return NULL;
}
/* /*
* iwl_pcie_rx_handle - Main entry function for receiving responses from fw * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
*/ */
...@@ -1297,7 +1385,7 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue) ...@@ -1297,7 +1385,7 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
spin_lock(&rxq->lock); spin_lock(&rxq->lock);
/* uCode's read index (stored in shared DRAM) indicates the last Rx /* uCode's read index (stored in shared DRAM) indicates the last Rx
* buffer that the driver may process (last buffer filled by ucode). */ * buffer that the driver may process (last buffer filled by ucode). */
r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
i = rxq->read; i = rxq->read;
/* W/A 9000 device step A0 wrap-around bug */ /* W/A 9000 device step A0 wrap-around bug */
...@@ -1313,30 +1401,9 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue) ...@@ -1313,30 +1401,9 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
if (unlikely(rxq->used_count == rxq->queue_size / 2)) if (unlikely(rxq->used_count == rxq->queue_size / 2))
emergency = true; emergency = true;
if (trans->cfg->mq_rx_supported) { rxb = iwl_pcie_get_rxb(trans, rxq, i);
/* if (!rxb)
* used_bd is a 32 bit but only 12 are used to retrieve goto out;
* the vid
*/
u16 vid = le32_to_cpu(rxq->used_bd[i]) & 0x0FFF;
if (WARN(!vid ||
vid > ARRAY_SIZE(trans_pcie->global_table),
"Invalid rxb index from HW %u\n", (u32)vid)) {
iwl_force_nmi(trans);
goto out;
}
rxb = trans_pcie->global_table[vid - 1];
if (WARN(rxb->invalid,
"Invalid rxb from HW %u\n", (u32)vid)) {
iwl_force_nmi(trans);
goto out;
}
rxb->invalid = true;
} else {
rxb = rxq->queue[i];
rxq->queue[i] = NULL;
}
IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i); IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency); iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency);
...@@ -1378,6 +1445,9 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue) ...@@ -1378,6 +1445,9 @@ static void iwl_pcie_rx_handle(struct iwl_trans *trans, int queue)
out: out:
/* Backtrack one entry */ /* Backtrack one entry */
rxq->read = i; rxq->read = i;
/* update cr tail with the rxq read pointer */
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
*rxq->cr_tail = cpu_to_le16(r);
spin_unlock(&rxq->lock); spin_unlock(&rxq->lock);
/* /*
...@@ -1409,20 +1479,6 @@ static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry) ...@@ -1409,20 +1479,6 @@ static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
return container_of(entries, struct iwl_trans_pcie, msix_entries[0]); return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
} }
static inline void iwl_pcie_clear_irq(struct iwl_trans *trans,
struct msix_entry *entry)
{
/*
* Before sending the interrupt the HW disables it to prevent
* a nested interrupt. This is done by writing 1 to the corresponding
* bit in the mask register. After handling the interrupt, it should be
* re-enabled by clearing this bit. This register is defined as
* write 1 clear (W1C) register, meaning that it's being clear
* by writing 1 to the bit.
*/
iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry));
}
/* /*
* iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
* This interrupt handler should be used with RSS queue only. * This interrupt handler should be used with RSS queue only.
......
...@@ -84,6 +84,7 @@ ...@@ -84,6 +84,7 @@
#include "iwl-scd.h" #include "iwl-scd.h"
#include "iwl-agn-hw.h" #include "iwl-agn-hw.h"
#include "fw/error-dump.h" #include "fw/error-dump.h"
#include "fw/dbg.h"
#include "internal.h" #include "internal.h"
#include "iwl-fh.h" #include "iwl-fh.h"
...@@ -1562,18 +1563,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, ...@@ -1562,18 +1563,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
iwl_pcie_enable_rx_wake(trans, true); iwl_pcie_enable_rx_wake(trans, true);
/*
* Reconfigure IVAR table in case of MSIX or reset ict table in
* MSI mode since HW reset erased it.
* Also enables interrupts - none will happen as
* the device doesn't know we're waking it up, only when
* the opmode actually tells it after this call.
*/
iwl_pcie_conf_msix_hw(trans_pcie);
if (!trans_pcie->msix_enabled)
iwl_pcie_reset_ict(trans);
iwl_enable_interrupts(trans);
iwl_set_bit(trans, CSR_GP_CNTRL, iwl_set_bit(trans, CSR_GP_CNTRL,
BIT(trans->cfg->csr->flag_mac_access_req)); BIT(trans->cfg->csr->flag_mac_access_req));
iwl_set_bit(trans, CSR_GP_CNTRL, iwl_set_bit(trans, CSR_GP_CNTRL,
...@@ -1591,6 +1580,18 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, ...@@ -1591,6 +1580,18 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
return ret; return ret;
} }
/*
* Reconfigure IVAR table in case of MSIX or reset ict table in
* MSI mode since HW reset erased it.
* Also enables interrupts - none will happen as
* the device doesn't know we're waking it up, only when
* the opmode actually tells it after this call.
*/
iwl_pcie_conf_msix_hw(trans_pcie);
if (!trans_pcie->msix_enabled)
iwl_pcie_reset_ict(trans);
iwl_enable_interrupts(trans);
iwl_pcie_set_pwr(trans, false); iwl_pcie_set_pwr(trans, false);
if (!reset) { if (!reset) {
...@@ -1708,29 +1709,6 @@ static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans) ...@@ -1708,29 +1709,6 @@ static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
} }
} }
static const char *queue_name(struct device *dev,
struct iwl_trans_pcie *trans_p, int i)
{
if (trans_p->shared_vec_mask) {
int vec = trans_p->shared_vec_mask &
IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
if (i == 0)
return DRV_NAME ": shared IRQ";
return devm_kasprintf(dev, GFP_KERNEL,
DRV_NAME ": queue %d", i + vec);
}
if (i == 0)
return DRV_NAME ": default queue";
if (i == trans_p->alloc_vecs - 1)
return DRV_NAME ": exception";
return devm_kasprintf(dev, GFP_KERNEL,
DRV_NAME ": queue %d", i);
}
static int iwl_pcie_init_msix_handler(struct pci_dev *pdev, static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
struct iwl_trans_pcie *trans_pcie) struct iwl_trans_pcie *trans_pcie)
{ {
...@@ -2265,6 +2243,22 @@ void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) ...@@ -2265,6 +2243,22 @@ void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
iwl_read_direct32(trans, FH_TX_TRB_REG(fifo))); iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
} }
static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
struct iwl_trans_rxq_dma_data *data)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
if (queue >= trans->num_rx_queues || !trans_pcie->rxq)
return -EINVAL;
data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma;
data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma;
data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma;
data->fr_bd_wid = 0;
return 0;
}
static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
...@@ -2545,10 +2539,11 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, ...@@ -2545,10 +2539,11 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n", pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n",
rxq->free_count); rxq->free_count);
if (rxq->rb_stts) { if (rxq->rb_stts) {
u32 r = __le16_to_cpu(iwl_get_closed_rb_stts(trans,
rxq));
pos += scnprintf(buf + pos, bufsz - pos, pos += scnprintf(buf + pos, bufsz - pos,
"\tclosed_rb_num: %u\n", "\tclosed_rb_num: %u\n",
le16_to_cpu(rxq->rb_stts->closed_rb_num) & r & 0x0FFF);
0x0FFF);
} else { } else {
pos += scnprintf(buf + pos, bufsz - pos, pos += scnprintf(buf + pos, bufsz - pos,
"\tclosed_rb_num: Not Allocated\n"); "\tclosed_rb_num: Not Allocated\n");
...@@ -2754,7 +2749,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, ...@@ -2754,7 +2749,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
spin_lock(&rxq->lock); spin_lock(&rxq->lock);
r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
for (i = rxq->read, j = 0; for (i = rxq->read, j = 0;
i != r && j < allocated_rb_nums; i != r && j < allocated_rb_nums;
...@@ -2961,7 +2956,8 @@ static struct iwl_trans_dump_data ...@@ -2961,7 +2956,8 @@ static struct iwl_trans_dump_data
u32 monitor_len; u32 monitor_len;
int i, ptr; int i, ptr;
bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) && bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
!trans->cfg->mq_rx_supported; !trans->cfg->mq_rx_supported &&
trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
/* transport dump header */ /* transport dump header */
len = sizeof(*dump_data); len = sizeof(*dump_data);
...@@ -3013,6 +3009,10 @@ static struct iwl_trans_dump_data ...@@ -3013,6 +3009,10 @@ static struct iwl_trans_dump_data
} }
if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) { if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) {
if (!(trans->dbg_dump_mask &
BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)))
return NULL;
dump_data = vzalloc(len); dump_data = vzalloc(len);
if (!dump_data) if (!dump_data)
return NULL; return NULL;
...@@ -3025,22 +3025,28 @@ static struct iwl_trans_dump_data ...@@ -3025,22 +3025,28 @@ static struct iwl_trans_dump_data
} }
/* CSR registers */ /* CSR registers */
len += sizeof(*data) + IWL_CSR_TO_DUMP; if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
len += sizeof(*data) + IWL_CSR_TO_DUMP;
/* FH registers */ /* FH registers */
if (trans->cfg->gen2) if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) {
len += sizeof(*data) + if (trans->cfg->gen2)
(FH_MEM_UPPER_BOUND_GEN2 - FH_MEM_LOWER_BOUND_GEN2); len += sizeof(*data) +
else (FH_MEM_UPPER_BOUND_GEN2 -
len += sizeof(*data) + FH_MEM_LOWER_BOUND_GEN2);
(FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND); else
len += sizeof(*data) +
(FH_MEM_UPPER_BOUND -
FH_MEM_LOWER_BOUND);
}
if (dump_rbs) { if (dump_rbs) {
/* Dump RBs is supported only for pre-9000 devices (1 queue) */ /* Dump RBs is supported only for pre-9000 devices (1 queue) */
struct iwl_rxq *rxq = &trans_pcie->rxq[0]; struct iwl_rxq *rxq = &trans_pcie->rxq[0];
/* RBs */ /* RBs */
num_rbs = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) num_rbs =
& 0x0FFF; le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq))
& 0x0FFF;
num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK; num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
len += num_rbs * (sizeof(*data) + len += num_rbs * (sizeof(*data) +
sizeof(struct iwl_fw_error_dump_rb) + sizeof(struct iwl_fw_error_dump_rb) +
...@@ -3048,7 +3054,8 @@ static struct iwl_trans_dump_data ...@@ -3048,7 +3054,8 @@ static struct iwl_trans_dump_data
} }
/* Paged memory for gen2 HW */ /* Paged memory for gen2 HW */
if (trans->cfg->gen2) if (trans->cfg->gen2 &&
trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))
for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++) for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++)
len += sizeof(*data) + len += sizeof(*data) +
sizeof(struct iwl_fw_error_dump_paging) + sizeof(struct iwl_fw_error_dump_paging) +
...@@ -3060,41 +3067,51 @@ static struct iwl_trans_dump_data ...@@ -3060,41 +3067,51 @@ static struct iwl_trans_dump_data
len = 0; len = 0;
data = (void *)dump_data->data; data = (void *)dump_data->data;
data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
txcmd = (void *)data->data; if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD)) {
spin_lock_bh(&cmdq->lock); u16 tfd_size = trans_pcie->tfd_size;
ptr = cmdq->write_ptr;
for (i = 0; i < cmdq->n_window; i++) { data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr); txcmd = (void *)data->data;
u32 caplen, cmdlen; spin_lock_bh(&cmdq->lock);
ptr = cmdq->write_ptr;
cmdlen = iwl_trans_pcie_get_cmdlen(trans, cmdq->tfds + for (i = 0; i < cmdq->n_window; i++) {
trans_pcie->tfd_size * ptr); u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr);
caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen); u32 caplen, cmdlen;
if (cmdlen) { cmdlen = iwl_trans_pcie_get_cmdlen(trans,
len += sizeof(*txcmd) + caplen; cmdq->tfds +
txcmd->cmdlen = cpu_to_le32(cmdlen); tfd_size * ptr);
txcmd->caplen = cpu_to_le32(caplen); caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen);
txcmd = (void *)((u8 *)txcmd->data + caplen); if (cmdlen) {
len += sizeof(*txcmd) + caplen;
txcmd->cmdlen = cpu_to_le32(cmdlen);
txcmd->caplen = cpu_to_le32(caplen);
memcpy(txcmd->data, cmdq->entries[idx].cmd,
caplen);
txcmd = (void *)((u8 *)txcmd->data + caplen);
}
ptr = iwl_queue_dec_wrap(trans, ptr);
} }
spin_unlock_bh(&cmdq->lock);
ptr = iwl_queue_dec_wrap(trans, ptr); data->len = cpu_to_le32(len);
len += sizeof(*data);
data = iwl_fw_error_next_data(data);
} }
spin_unlock_bh(&cmdq->lock);
data->len = cpu_to_le32(len);
len += sizeof(*data);
data = iwl_fw_error_next_data(data);
len += iwl_trans_pcie_dump_csr(trans, &data); if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
len += iwl_trans_pcie_fh_regs_dump(trans, &data); len += iwl_trans_pcie_dump_csr(trans, &data);
if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS))
len += iwl_trans_pcie_fh_regs_dump(trans, &data);
if (dump_rbs) if (dump_rbs)
len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs); len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
/* Paged memory for gen2 HW */ /* Paged memory for gen2 HW */
if (trans->cfg->gen2) { if (trans->cfg->gen2 &&
trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) {
for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++) { for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++) {
struct iwl_fw_error_dump_paging *paging; struct iwl_fw_error_dump_paging *paging;
dma_addr_t addr = dma_addr_t addr =
...@@ -3114,8 +3131,8 @@ static struct iwl_trans_dump_data ...@@ -3114,8 +3131,8 @@ static struct iwl_trans_dump_data
len += sizeof(*data) + sizeof(*paging) + page_len; len += sizeof(*data) + sizeof(*paging) + page_len;
} }
} }
if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len); len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
dump_data->len = len; dump_data->len = len;
...@@ -3210,6 +3227,7 @@ static const struct iwl_trans_ops trans_ops_pcie_gen2 = { ...@@ -3210,6 +3227,7 @@ static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
.txq_alloc = iwl_trans_pcie_dyn_txq_alloc, .txq_alloc = iwl_trans_pcie_dyn_txq_alloc,
.txq_free = iwl_trans_pcie_dyn_txq_free, .txq_free = iwl_trans_pcie_dyn_txq_free,
.wait_txq_empty = iwl_trans_pcie_wait_txq_empty, .wait_txq_empty = iwl_trans_pcie_wait_txq_empty,
.rxq_dma_data = iwl_trans_pcie_rxq_dma_data,
}; };
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
......
...@@ -52,6 +52,7 @@ ...@@ -52,6 +52,7 @@
*****************************************************************************/ *****************************************************************************/
#include <linux/pm_runtime.h> #include <linux/pm_runtime.h>
#include <net/tso.h> #include <net/tso.h>
#include <linux/tcp.h>
#include "iwl-debug.h" #include "iwl-debug.h"
#include "iwl-csr.h" #include "iwl-csr.h"
...@@ -364,58 +365,89 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans, ...@@ -364,58 +365,89 @@ static int iwl_pcie_gen2_build_amsdu(struct iwl_trans *trans,
return -EINVAL; return -EINVAL;
} }
static static struct
struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans,
struct iwl_txq *txq, struct iwl_txq *txq,
struct iwl_device_cmd *dev_cmd, struct iwl_device_cmd *dev_cmd,
struct sk_buff *skb, struct sk_buff *skb,
struct iwl_cmd_meta *out_meta) struct iwl_cmd_meta *out_meta,
int hdr_len,
int tx_cmd_len)
{ {
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx); struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
dma_addr_t tb_phys; dma_addr_t tb_phys;
bool amsdu; int len;
int i, len, tb1_len, tb2_len, hdr_len;
void *tb1_addr; void *tb1_addr;
memset(tfd, 0, sizeof(*tfd)); tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
amsdu = ieee80211_is_data_qos(hdr->frame_control) && iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
(*ieee80211_get_qos_ctl(hdr) &
IEEE80211_QOS_CTL_A_MSDU_PRESENT); /*
* The second TB (tb1) points to the remainder of the TX command
* and the 802.11 header - dword aligned size
* (This calculation modifies the TX command, so do it before the
* setup of the first TB)
*/
len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
IWL_FIRST_TB_SIZE;
/* do not align A-MSDU to dword as the subframe header aligns it */
/* map the data for TB1 */
tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
goto out_err;
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, len);
if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
len + IWL_FIRST_TB_SIZE,
hdr_len, dev_cmd))
goto out_err;
/* building the A-MSDU might have changed this data, memcpy it now */
memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE);
return tfd;
out_err:
iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd);
return NULL;
}
static struct
iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans,
struct iwl_txq *txq,
struct iwl_device_cmd *dev_cmd,
struct sk_buff *skb,
struct iwl_cmd_meta *out_meta,
int hdr_len,
int tx_cmd_len)
{
int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
dma_addr_t tb_phys;
int i, len, tb1_len, tb2_len;
void *tb1_addr;
tb_phys = iwl_pcie_get_first_tb_dma(txq, idx); tb_phys = iwl_pcie_get_first_tb_dma(txq, idx);
/* The first TB points to bi-directional DMA data */ /* The first TB points to bi-directional DMA data */
if (!amsdu) memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE);
memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr,
IWL_FIRST_TB_SIZE);
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
/* there must be data left over for TB1 or this code must be changed */
BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
/* /*
* The second TB (tb1) points to the remainder of the TX command * The second TB (tb1) points to the remainder of the TX command
* and the 802.11 header - dword aligned size * and the 802.11 header - dword aligned size
* (This calculation modifies the TX command, so do it before the * (This calculation modifies the TX command, so do it before the
* setup of the first TB) * setup of the first TB)
*/ */
if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
len = sizeof(struct iwl_tx_cmd_gen2); IWL_FIRST_TB_SIZE;
else
len = sizeof(struct iwl_tx_cmd_gen3);
len += sizeof(struct iwl_cmd_header) + tb1_len = ALIGN(len, 4);
ieee80211_hdrlen(hdr->frame_control) -
IWL_FIRST_TB_SIZE;
/* do not align A-MSDU to dword as the subframe header aligns it */
if (amsdu)
tb1_len = len;
else
tb1_len = ALIGN(len, 4);
/* map the data for TB1 */ /* map the data for TB1 */
tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
...@@ -424,23 +456,6 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, ...@@ -424,23 +456,6 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
goto out_err; goto out_err;
iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len); iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
hdr_len = ieee80211_hdrlen(hdr->frame_control);
if (amsdu) {
if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd,
tb1_len + IWL_FIRST_TB_SIZE,
hdr_len, dev_cmd))
goto out_err;
/*
* building the A-MSDU might have changed this data, so memcpy
* it now
*/
memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr,
IWL_FIRST_TB_SIZE);
return tfd;
}
/* set up TFD's third entry to point to remainder of skb's head */ /* set up TFD's third entry to point to remainder of skb's head */
tb2_len = skb_headlen(skb) - hdr_len; tb2_len = skb_headlen(skb) - hdr_len;
...@@ -482,6 +497,43 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, ...@@ -482,6 +497,43 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
return NULL; return NULL;
} }
static
struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
struct iwl_txq *txq,
struct iwl_device_cmd *dev_cmd,
struct sk_buff *skb,
struct iwl_cmd_meta *out_meta)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx);
int len, hdr_len;
bool amsdu;
/* There must be data left over for TB1 or this code must be changed */
BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE);
memset(tfd, 0, sizeof(*tfd));
if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
len = sizeof(struct iwl_tx_cmd_gen2);
else
len = sizeof(struct iwl_tx_cmd_gen3);
amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
(*ieee80211_get_qos_ctl(hdr) &
IEEE80211_QOS_CTL_A_MSDU_PRESENT);
hdr_len = ieee80211_hdrlen(hdr->frame_control);
if (amsdu)
return iwl_pcie_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
out_meta, hdr_len, len);
return iwl_pcie_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
hdr_len, len);
}
int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, int txq_id) struct iwl_device_cmd *dev_cmd, int txq_id)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment