Commit 0ad64191 authored by Kalle Valo's avatar Kalle Valo

Merge tag 'iwlwifi-next-for-kalle-2018-07-26' of...

Merge tag 'iwlwifi-next-for-kalle-2018-07-26' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next

First batch of iwlwifi patches for 4.19

* Implement 802.11ax D2.0;
* Changes in the base code to support the new 22560 devices;
* Support for the new 22560 device family;
parents bf9b608e 1a4968d1
......@@ -7,7 +7,8 @@ iwlwifi-objs += iwl-debug.o
iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o
iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o
iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o
iwlwifi-objs += pcie/ctxt-info.o pcie/trans-gen2.o pcie/tx-gen2.o
iwlwifi-objs += pcie/ctxt-info.o pcie/ctxt-info-gen3.o
iwlwifi-objs += pcie/trans-gen2.o pcie/tx-gen2.o
iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o
iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o
iwlwifi-objs += iwl-trans.o
......
......@@ -63,6 +63,7 @@
static const struct iwl_base_params iwl2000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
.max_tfd_queue_size = 256,
.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
.shadow_ram_support = true,
.led_compensation = 51,
......@@ -76,6 +77,7 @@ static const struct iwl_base_params iwl2000_base_params = {
static const struct iwl_base_params iwl2030_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
.max_tfd_queue_size = 256,
.max_ll_items = OTP_MAX_LL_ITEMS_2x00,
.shadow_ram_support = true,
.led_compensation = 57,
......
......@@ -76,26 +76,45 @@
#define IWL_22000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-"
#define IWL_22000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-"
#define IWL_22000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-"
#define IWL_22000_HR_F0_FW_PRE "iwlwifi-QuQnj-f0-hr-a0-"
#define IWL_22000_HR_A_F0_FW_PRE "iwlwifi-QuQnj-f0-hr-a0-"
#define IWL_22000_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-"
#define IWL_22000_JF_B0_FW_PRE "iwlwifi-QuQnj-a0-jf-b0-"
#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-"
#define IWL_22000_SU_Z0_FW_PRE "iwlwifi-su-z0-"
#define IWL_22000_HR_MODULE_FIRMWARE(api) \
IWL_22000_HR_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_JF_MODULE_FIRMWARE(api) \
IWL_22000_JF_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_HR_F0_QNJ_MODULE_FIRMWARE(api) \
IWL_22000_HR_F0_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(api) \
IWL_22000_HR_A_F0_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(api) \
IWL_22000_HR_B_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(api) \
IWL_22000_JF_B0_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(api) \
IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode"
#define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \
IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode"
#define NVM_HW_SECTION_NUM_FAMILY_22000 10
static const struct iwl_base_params iwl_22000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_22000,
.num_of_queues = 512,
.max_tfd_queue_size = 256,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
.max_event_log_size = 512,
.shadow_reg_enable = true,
.pcie_l1_allowed = true,
};
static const struct iwl_base_params iwl_22560_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_22000,
.num_of_queues = 512,
.max_tfd_queue_size = 65536,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
......@@ -110,11 +129,9 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ),
};
#define IWL_DEVICE_22000 \
#define IWL_DEVICE_22000_COMMON \
.ucode_api_max = IWL_22000_UCODE_API_MAX, \
.ucode_api_min = IWL_22000_UCODE_API_MIN, \
.device_family = IWL_DEVICE_FAMILY_22000, \
.base_params = &iwl_22000_base_params, \
.led_mode = IWL_LED_RF_STATE, \
.nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_22000, \
.non_shared_ant = ANT_A, \
......@@ -129,6 +146,10 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.mq_rx_supported = true, \
.vht_mu_mimo_supported = true, \
.mac_addr_from_csr = true, \
.ht_params = &iwl_22000_ht_params, \
.nvm_ver = IWL_22000_NVM_VERSION, \
.nvm_calib_ver = IWL_22000_TX_POWER_VERSION, \
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \
.use_tfh = true, \
.rf_id = true, \
.gen2 = true, \
......@@ -136,86 +157,114 @@ static const struct iwl_ht_params iwl_22000_ht_params = {
.dbgc_supported = true, \
.min_umac_error_event_table = 0x400000
#define IWL_DEVICE_22500 \
IWL_DEVICE_22000_COMMON, \
.device_family = IWL_DEVICE_FAMILY_22000, \
.base_params = &iwl_22000_base_params, \
.csr = &iwl_csr_v1
#define IWL_DEVICE_22560 \
IWL_DEVICE_22000_COMMON, \
.device_family = IWL_DEVICE_FAMILY_22560, \
.base_params = &iwl_22560_base_params, \
.csr = &iwl_csr_v2
const struct iwl_cfg iwl22000_2ac_cfg_hr = {
.name = "Intel(R) Dual Band Wireless AC 22000",
.fw_name_pre = IWL_22000_HR_FW_PRE,
IWL_DEVICE_22000,
.csr = &iwl_csr_v1,
.ht_params = &iwl_22000_ht_params,
.nvm_ver = IWL_22000_NVM_VERSION,
.nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
IWL_DEVICE_22500,
};
const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb = {
.name = "Intel(R) Dual Band Wireless AC 22000",
.fw_name_pre = IWL_22000_HR_CDB_FW_PRE,
IWL_DEVICE_22000,
.csr = &iwl_csr_v1,
.ht_params = &iwl_22000_ht_params,
.nvm_ver = IWL_22000_NVM_VERSION,
.nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
IWL_DEVICE_22500,
.cdb = true,
};
const struct iwl_cfg iwl22000_2ac_cfg_jf = {
.name = "Intel(R) Dual Band Wireless AC 22000",
.fw_name_pre = IWL_22000_JF_FW_PRE,
IWL_DEVICE_22000,
.csr = &iwl_csr_v1,
.ht_params = &iwl_22000_ht_params,
.nvm_ver = IWL_22000_NVM_VERSION,
.nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
IWL_DEVICE_22500,
};
const struct iwl_cfg iwl22000_2ax_cfg_hr = {
.name = "Intel(R) Dual Band Wireless AX 22000",
.fw_name_pre = IWL_22000_HR_FW_PRE,
IWL_DEVICE_22000,
.csr = &iwl_csr_v1,
.ht_params = &iwl_22000_ht_params,
.nvm_ver = IWL_22000_NVM_VERSION,
.nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
IWL_DEVICE_22500,
/*
* This device doesn't support receiving BlockAck with a large bitmap
* so we need to restrict the size of transmitted aggregation to the
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_f0 = {
const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0 = {
.name = "Intel(R) Dual Band Wireless AX 22000",
.fw_name_pre = IWL_22000_HR_F0_FW_PRE,
IWL_DEVICE_22000,
.csr = &iwl_csr_v1,
.ht_params = &iwl_22000_ht_params,
.nvm_ver = IWL_22000_NVM_VERSION,
.nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
.fw_name_pre = IWL_22000_HR_A_F0_FW_PRE,
IWL_DEVICE_22500,
/*
* This device doesn't support receiving BlockAck with a large bitmap
* so we need to restrict the size of transmitted aggregation to the
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0 = {
.name = "Intel(R) Dual Band Wireless AX 22000",
.fw_name_pre = IWL_22000_HR_B_FW_PRE,
IWL_DEVICE_22500,
/*
* This device doesn't support receiving BlockAck with a large bitmap
* so we need to restrict the size of transmitted aggregation to the
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0 = {
.name = "Intel(R) Dual Band Wireless AX 22000",
.fw_name_pre = IWL_22000_JF_B0_FW_PRE,
IWL_DEVICE_22000,
.csr = &iwl_csr_v1,
.ht_params = &iwl_22000_ht_params,
.nvm_ver = IWL_22000_NVM_VERSION,
.nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
IWL_DEVICE_22500,
/*
* This device doesn't support receiving BlockAck with a large bitmap
* so we need to restrict the size of transmitted aggregation to the
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = {
.name = "Intel(R) Dual Band Wireless AX 22000",
.fw_name_pre = IWL_22000_HR_A0_FW_PRE,
IWL_DEVICE_22000,
.csr = &iwl_csr_v1,
.ht_params = &iwl_22000_ht_params,
.nvm_ver = IWL_22000_NVM_VERSION,
.nvm_calib_ver = IWL_22000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
IWL_DEVICE_22500,
/*
* This device doesn't support receiving BlockAck with a large bitmap
* so we need to restrict the size of transmitted aggregation to the
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
const struct iwl_cfg iwl22560_2ax_cfg_su_cdb = {
.name = "Intel(R) Dual Band Wireless AX 22560",
.fw_name_pre = IWL_22000_SU_Z0_FW_PRE,
IWL_DEVICE_22560,
.cdb = true,
/*
* This device doesn't support receiving BlockAck with a large bitmap
* so we need to restrict the size of transmitted aggregation to the
* HT size; mac80211 would otherwise pick the HE max (256) by default.
*/
.max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT,
};
MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_JF_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX));
......@@ -53,6 +53,7 @@
static const struct iwl_base_params iwl5000_base_params = {
.eeprom_size = IWLAGN_EEPROM_IMG_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
.max_tfd_queue_size = 256,
.pll_cfg = true,
.led_compensation = 51,
.wd_timeout = IWL_WATCHDOG_DISABLED,
......
......@@ -72,6 +72,7 @@
static const struct iwl_base_params iwl6000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
.max_tfd_queue_size = 256,
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
.shadow_ram_support = true,
.led_compensation = 51,
......@@ -84,6 +85,7 @@ static const struct iwl_base_params iwl6000_base_params = {
static const struct iwl_base_params iwl6050_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
.max_tfd_queue_size = 256,
.max_ll_items = OTP_MAX_LL_ITEMS_6x50,
.shadow_ram_support = true,
.led_compensation = 51,
......@@ -96,6 +98,7 @@ static const struct iwl_base_params iwl6050_base_params = {
static const struct iwl_base_params iwl6000_g2_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE,
.num_of_queues = IWLAGN_NUM_QUEUES,
.max_tfd_queue_size = 256,
.max_ll_items = OTP_MAX_LL_ITEMS_6x00,
.shadow_ram_support = true,
.led_compensation = 57,
......
......@@ -123,6 +123,7 @@
static const struct iwl_base_params iwl7000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_7000,
.num_of_queues = 31,
.max_tfd_queue_size = 256,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
......
......@@ -104,6 +104,7 @@
static const struct iwl_base_params iwl8000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_8000,
.num_of_queues = 31,
.max_tfd_queue_size = 256,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
......
......@@ -95,6 +95,7 @@
static const struct iwl_base_params iwl9000_base_params = {
.eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_9000,
.num_of_queues = 31,
.max_tfd_queue_size = 256,
.shadow_ram_support = true,
.led_compensation = 57,
.wd_timeout = IWL_LONG_WD_TIMEOUT,
......
......@@ -193,7 +193,8 @@ enum iwl_legacy_cmds {
FW_GET_ITEM_CMD = 0x1a,
/**
* @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2,
* @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2 or
* &struct iwl_tx_cmd_gen3,
* response in &struct iwl_mvm_tx_resp or
* &struct iwl_mvm_tx_resp_v3
*/
......
......@@ -82,6 +82,11 @@ enum iwl_data_path_subcmd_ids {
*/
TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2,
/**
* @STA_HE_CTXT_CMD: &struct iwl_he_sta_context_cmd
*/
STA_HE_CTXT_CMD = 0x7,
/**
* @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd
*/
......
......@@ -7,6 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
......@@ -28,6 +29,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -279,6 +281,10 @@ enum iwl_mac_filter_flags {
MAC_FILTER_OUT_BCAST = BIT(8),
MAC_FILTER_IN_CRC32 = BIT(11),
MAC_FILTER_IN_PROBE_REQUEST = BIT(12),
/**
* @MAC_FILTER_IN_11AX: mark BSS as supporting 802.11ax
*/
MAC_FILTER_IN_11AX = BIT(14),
};
/**
......@@ -406,4 +412,170 @@ struct iwl_missed_beacons_notif {
__le32 num_recvd_beacons;
} __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */
/**
* struct iwl_he_backoff_conf - used for backoff configuration
* Per each trigger-based AC, (set by MU EDCA Parameter set info-element)
* used for backoff configuration of TXF5..TXF8 trigger based.
* The MU-TIMER is reloaded w/ MU_TIME each time a frame from the AC is sent via
* trigger-based TX.
* @cwmin: CW min
* @cwmax: CW max
* @aifsn: AIFSN
* AIFSN=0, means that no backoff from the specified TRIG-BASED AC is
* allowed till the MU-TIMER is 0
* @mu_time: MU time in 8TU units
*/
struct iwl_he_backoff_conf {
__le16 cwmin;
__le16 cwmax;
__le16 aifsn;
__le16 mu_time;
} __packed; /* AC_QOS_DOT11AX_API_S */
#define MAX_HE_SUPP_NSS 2
#define MAX_HE_CHANNEL_BW_INDX 4
/**
* struct iwl_he_pkt_ext - QAM thresholds
* The required PPE is set via HE Capabilities IE, per Nss x BW x MCS
* The IE is organized in the following way:
* Support for Nss x BW (or RU) matrix:
* (0=SISO, 1=MIMO2) x (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz)
* Each entry contains 2 QAM thresholds for 8us and 16us:
* 0=BPSK, 1=QPSK, 2=16QAM, 3=64QAM, 4=256QAM, 5=1024QAM, 6/7=RES
* i.e. QAM_th1 < QAM_th2 such if TX uses QAM_tx:
* QAM_tx < QAM_th1 --> PPE=0us
* QAM_th1 <= QAM_tx < QAM_th2 --> PPE=8us
* QAM_th2 <= QAM_tx --> PPE=16us
* @pkt_ext_qam_th: QAM thresholds
* For each Nss/Bw define 2 QAM thrsholds (0..5)
* For rates below the low_th, no need for PPE
* For rates between low_th and high_th, need 8us PPE
* For rates equal or higher then the high_th, need 16us PPE
* Nss (0-siso, 1-mimo2) x BW (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz) x
* (0-low_th, 1-high_th)
*/
struct iwl_he_pkt_ext {
u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_HE_CHANNEL_BW_INDX][2];
} __packed; /* PKT_EXT_DOT11AX_API_S */
/**
* enum iwl_he_sta_ctxt_flags - HE STA context flags
* @STA_CTXT_HE_REF_BSSID_VALID: ref bssid addr valid (for receiving specific
* control frames such as TRIG, NDPA, BACK)
* @STA_CTXT_HE_BSS_COLOR_DIS: BSS color disable, don't use the BSS
* color for RX filter but use MAC header
* @STA_CTXT_HE_PARTIAL_BSS_COLOR: partial BSS color allocation
* @STA_CTXT_HE_32BIT_BA_BITMAP: indicates the receiver supports BA bitmap
* of 32-bits
* @STA_CTXT_HE_PACKET_EXT: indicates that the packet-extension info is valid
* and should be used
* @STA_CTXT_HE_TRIG_RND_ALLOC: indicates that trigger based random allocation
* is enabled according to UORA element existence
* @STA_CTXT_HE_CONST_TRIG_RND_ALLOC: used for AV testing
* @STA_CTXT_HE_ACK_ENABLED: indicates that the AP supports receiving ACK-
* enabled AGG, i.e. both BACK and non-BACK frames in a single AGG
* @STA_CTXT_HE_MU_EDCA_CW: indicates that there is an element of MU EDCA
* parameter set, i.e. the backoff counters for trig-based ACs
*/
enum iwl_he_sta_ctxt_flags {
STA_CTXT_HE_REF_BSSID_VALID = BIT(4),
STA_CTXT_HE_BSS_COLOR_DIS = BIT(5),
STA_CTXT_HE_PARTIAL_BSS_COLOR = BIT(6),
STA_CTXT_HE_32BIT_BA_BITMAP = BIT(7),
STA_CTXT_HE_PACKET_EXT = BIT(8),
STA_CTXT_HE_TRIG_RND_ALLOC = BIT(9),
STA_CTXT_HE_CONST_TRIG_RND_ALLOC = BIT(10),
STA_CTXT_HE_ACK_ENABLED = BIT(11),
STA_CTXT_HE_MU_EDCA_CW = BIT(12),
};
/**
* enum iwl_he_htc_flags - HE HTC support flags
* @IWL_HE_HTC_SUPPORT: HE-HTC support
* @IWL_HE_HTC_UL_MU_RESP_SCHED: HE UL MU response schedule
* support via A-control field
* @IWL_HE_HTC_BSR_SUPP: BSR support in A-control field
* @IWL_HE_HTC_OMI_SUPP: A-OMI support in A-control field
* @IWL_HE_HTC_BQR_SUPP: A-BQR support in A-control field
*/
enum iwl_he_htc_flags {
IWL_HE_HTC_SUPPORT = BIT(0),
IWL_HE_HTC_UL_MU_RESP_SCHED = BIT(3),
IWL_HE_HTC_BSR_SUPP = BIT(4),
IWL_HE_HTC_OMI_SUPP = BIT(5),
IWL_HE_HTC_BQR_SUPP = BIT(6),
};
/*
* @IWL_HE_HTC_LINK_ADAP_NO_FEEDBACK: the STA does not provide HE MFB
* @IWL_HE_HTC_LINK_ADAP_UNSOLICITED: the STA provides only unsolicited HE MFB
* @IWL_HE_HTC_LINK_ADAP_BOTH: the STA is capable of providing HE MFB in
* response to HE MRQ and if the STA provides unsolicited HE MFB
*/
#define IWL_HE_HTC_LINK_ADAP_POS (1)
#define IWL_HE_HTC_LINK_ADAP_NO_FEEDBACK (0)
#define IWL_HE_HTC_LINK_ADAP_UNSOLICITED (2 << IWL_HE_HTC_LINK_ADAP_POS)
#define IWL_HE_HTC_LINK_ADAP_BOTH (3 << IWL_HE_HTC_LINK_ADAP_POS)
/**
* struct iwl_he_sta_context_cmd - configure FW to work with HE AP
* @sta_id: STA id
* @tid_limit: max num of TIDs in TX HE-SU multi-TID agg
* 0 - bad value, 1 - multi-tid not supported, 2..8 - tid limit
* @reserved1: reserved byte for future use
* @reserved2: reserved byte for future use
* @flags: see %iwl_11ax_sta_ctxt_flags
* @ref_bssid_addr: reference BSSID used by the AP
* @reserved0: reserved 2 bytes for aligning the ref_bssid_addr field to 8 bytes
* @htc_flags: which features are supported in HTC
* @frag_flags: frag support in A-MSDU
* @frag_level: frag support level
* @frag_max_num: max num of "open" MSDUs in the receiver (in power of 2)
* @frag_min_size: min frag size (except last frag)
* @pkt_ext: optional, exists according to PPE-present bit in the HE-PHY capa
* @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame
* @htc_trig_based_pkt_ext: default PE in 4us units
* @frame_time_rts_th: HE duration RTS threshold, in units of 32us
* @rand_alloc_ecwmin: random CWmin = 2**ECWmin-1
* @rand_alloc_ecwmax: random CWmax = 2**ECWmax-1
* @reserved3: reserved byte for future use
* @trig_based_txf: MU EDCA Parameter set for the trigger based traffic queues
*/
struct iwl_he_sta_context_cmd {
u8 sta_id;
u8 tid_limit;
u8 reserved1;
u8 reserved2;
__le32 flags;
/* The below fields are set via Multiple BSSID IE */
u8 ref_bssid_addr[6];
__le16 reserved0;
/* The below fields are set via HE-capabilities IE */
__le32 htc_flags;
u8 frag_flags;
u8 frag_level;
u8 frag_max_num;
u8 frag_min_size;
/* The below fields are set via PPE thresholds element */
struct iwl_he_pkt_ext pkt_ext;
/* The below fields are set via HE-Operation IE */
u8 bss_color;
u8 htc_trig_based_pkt_ext;
__le16 frame_time_rts_th;
/* Random access parameter set (i.e. RAPS) */
u8 rand_alloc_ecwmin;
u8 rand_alloc_ecwmax;
__le16 reserved3;
/* The below fields are set via MU EDCA parameter set element */
struct iwl_he_backoff_conf trig_based_txf[AC_NUM];
} __packed; /* STA_CONTEXT_DOT11AX_API_S */
#endif /* __iwl_fw_api_mac_h__ */
......@@ -195,7 +195,6 @@ struct iwl_nvm_get_info_general {
* @NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED: true if 5.2 band enabled
* @NVM_MAC_SKU_FLAGS_802_11N_ENABLED: true if 11n enabled
* @NVM_MAC_SKU_FLAGS_802_11AC_ENABLED: true if 11ac enabled
* @NVM_MAC_SKU_FLAGS_802_11AX_ENABLED: true if 11ax enabled
* @NVM_MAC_SKU_FLAGS_MIMO_DISABLED: true if MIMO disabled
* @NVM_MAC_SKU_FLAGS_WAPI_ENABLED: true if WAPI enabled
* @NVM_MAC_SKU_FLAGS_REG_CHECK_ENABLED: true if regulatory checker enabled
......@@ -206,6 +205,9 @@ enum iwl_nvm_mac_sku_flags {
NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED = BIT(1),
NVM_MAC_SKU_FLAGS_802_11N_ENABLED = BIT(2),
NVM_MAC_SKU_FLAGS_802_11AC_ENABLED = BIT(3),
/**
* @NVM_MAC_SKU_FLAGS_802_11AX_ENABLED: true if 11ax enabled
*/
NVM_MAC_SKU_FLAGS_802_11AX_ENABLED = BIT(4),
NVM_MAC_SKU_FLAGS_MIMO_DISABLED = BIT(5),
NVM_MAC_SKU_FLAGS_WAPI_ENABLED = BIT(8),
......
......@@ -314,8 +314,11 @@ enum {
IWL_RATE_MCS_8_INDEX,
IWL_RATE_MCS_9_INDEX,
IWL_LAST_VHT_RATE = IWL_RATE_MCS_9_INDEX,
IWL_RATE_MCS_10_INDEX,
IWL_RATE_MCS_11_INDEX,
IWL_LAST_HE_RATE = IWL_RATE_MCS_11_INDEX,
IWL_RATE_COUNT_LEGACY = IWL_LAST_NON_HT_RATE + 1,
IWL_RATE_COUNT = IWL_LAST_VHT_RATE + 1,
IWL_RATE_COUNT = IWL_LAST_HE_RATE + 1,
};
#define IWL_RATE_BIT_MSK(r) BIT(IWL_RATE_##r##M_INDEX)
......@@ -440,8 +443,8 @@ enum {
#define RATE_LEGACY_RATE_MSK 0xff
/* Bit 10 - OFDM HE */
#define RATE_MCS_OFDM_HE_POS 10
#define RATE_MCS_OFDM_HE_MSK BIT(RATE_MCS_OFDM_HE_POS)
#define RATE_MCS_HE_POS 10
#define RATE_MCS_HE_MSK BIT(RATE_MCS_HE_POS)
/*
* Bit 11-12: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz
......@@ -482,15 +485,33 @@ enum {
#define RATE_MCS_BF_MSK (1 << RATE_MCS_BF_POS)
/*
* Bit 20-21: HE guard interval and LTF type.
* (0) 1xLTF+1.6us, (1) 2xLTF+0.8us,
* (2) 2xLTF+1.6us, (3) 4xLTF+3.2us
* Bit 20-21: HE LTF type and guard interval
* HE (ext) SU:
* 0 1xLTF+0.8us
* 1 2xLTF+0.8us
* 2 2xLTF+1.6us
* 3 & SGI (bit 13) clear 4xLTF+3.2us
* 3 & SGI (bit 13) set 4xLTF+0.8us
* HE MU:
* 0 4xLTF+0.8us
* 1 2xLTF+0.8us
* 2 2xLTF+1.6us
* 3 4xLTF+3.2us
* HE TRIG:
* 0 1xLTF+1.6us
* 1 2xLTF+1.6us
* 2 4xLTF+3.2us
* 3 (does not occur)
*/
#define RATE_MCS_HE_GI_LTF_POS 20
#define RATE_MCS_HE_GI_LTF_MSK (3 << RATE_MCS_HE_GI_LTF_POS)
/* Bit 22-23: HE type. (0) SU, (1) SU_EXT, (2) MU, (3) trigger based */
#define RATE_MCS_HE_TYPE_POS 22
#define RATE_MCS_HE_TYPE_SU (0 << RATE_MCS_HE_TYPE_POS)
#define RATE_MCS_HE_TYPE_EXT_SU (1 << RATE_MCS_HE_TYPE_POS)
#define RATE_MCS_HE_TYPE_MU (2 << RATE_MCS_HE_TYPE_POS)
#define RATE_MCS_HE_TYPE_TRIG (3 << RATE_MCS_HE_TYPE_POS)
#define RATE_MCS_HE_TYPE_MSK (3 << RATE_MCS_HE_TYPE_POS)
/* Bit 24-25: (0) 20MHz (no dup), (1) 2x20MHz, (2) 4x20MHz, 3 8x20MHz */
......@@ -501,6 +522,9 @@ enum {
#define RATE_MCS_LDPC_POS 27
#define RATE_MCS_LDPC_MSK (1 << RATE_MCS_LDPC_POS)
/* Bit 28: (1) 106-tone RX (8 MHz RU), (0) normal bandwidth */
#define RATE_MCS_HE_106T_POS 28
#define RATE_MCS_HE_106T_MSK (1 << RATE_MCS_HE_106T_POS)
/* Link Quality definitions */
......
......@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
......@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -343,6 +345,37 @@ enum iwl_rx_mpdu_mac_info {
IWL_RX_MPDU_PHY_PHY_INDEX_MASK = 0xf0,
};
/*
* enum iwl_rx_he_phy - HE PHY data
*/
enum iwl_rx_he_phy {
IWL_RX_HE_PHY_BEAM_CHNG = BIT(0),
IWL_RX_HE_PHY_UPLINK = BIT(1),
IWL_RX_HE_PHY_BSS_COLOR_MASK = 0xfc,
IWL_RX_HE_PHY_SPATIAL_REUSE_MASK = 0xf00,
IWL_RX_HE_PHY_SU_EXT_BW10 = BIT(12),
IWL_RX_HE_PHY_TXOP_DUR_MASK = 0xfe000,
IWL_RX_HE_PHY_LDPC_EXT_SYM = BIT(20),
IWL_RX_HE_PHY_PRE_FEC_PAD_MASK = 0x600000,
IWL_RX_HE_PHY_PE_DISAMBIG = BIT(23),
IWL_RX_HE_PHY_DOPPLER = BIT(24),
/* 6 bits reserved */
IWL_RX_HE_PHY_DELIM_EOF = BIT(31),
/* second dword - MU data */
IWL_RX_HE_PHY_SIGB_COMPRESSION = BIT_ULL(32 + 0),
IWL_RX_HE_PHY_SIBG_SYM_OR_USER_NUM_MASK = 0x1e00000000ULL,
IWL_RX_HE_PHY_HE_LTF_NUM_MASK = 0xe000000000ULL,
IWL_RX_HE_PHY_RU_ALLOC_SEC80 = BIT_ULL(32 + 8),
/* trigger encoded */
IWL_RX_HE_PHY_RU_ALLOC_MASK = 0xfe0000000000ULL,
IWL_RX_HE_PHY_SIGB_MCS_MASK = 0xf000000000000ULL,
/* 1 bit reserved */
IWL_RX_HE_PHY_SIGB_DCM = BIT_ULL(32 + 21),
IWL_RX_HE_PHY_PREAMBLE_PUNC_TYPE_MASK = 0xc0000000000000ULL,
/* 8 bits reserved */
};
/**
* struct iwl_rx_mpdu_desc - RX MPDU descriptor
*/
......@@ -438,12 +471,20 @@ struct iwl_rx_mpdu_desc {
*/
__le32 gp2_on_air_rise;
/* DW12 & DW13 */
union {
/**
* @tsf_on_air_rise:
* TSF value on air rise (INA), only valid if
* %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set
*/
__le64 tsf_on_air_rise;
/**
* @he_phy_data:
* HE PHY data, see &enum iwl_rx_he_phy, valid
* only if %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set
*/
__le64 he_phy_data;
};
} __packed;
struct iwl_frame_release {
......
......@@ -7,6 +7,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
......@@ -28,6 +29,7 @@
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -320,6 +322,29 @@ struct iwl_tx_cmd_gen2 {
struct ieee80211_hdr hdr[0];
} __packed; /* TX_CMD_API_S_VER_7 */
/**
* struct iwl_tx_cmd_gen3 - TX command struct to FW for 22560 devices
* ( TX_CMD = 0x1c )
* @len: in bytes of the payload, see below for details
* @flags: combination of &enum iwl_tx_cmd_flags
* @offload_assist: TX offload configuration
* @dram_info: FW internal DRAM storage
* @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is
* cleared. Combination of RATE_MCS_*
* @ttl: time to live - packet lifetime limit. The FW should drop if
* passed.
* @hdr: 802.11 header
*/
struct iwl_tx_cmd_gen3 {
__le16 len;
__le16 flags;
__le32 offload_assist;
struct iwl_dram_sec_info dram_info;
__le32 rate_n_flags;
__le64 ttl;
struct ieee80211_hdr hdr[0];
} __packed; /* TX_CMD_API_S_VER_8 */
/*
* TX response related data
*/
......
......@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
......@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -143,7 +145,7 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt)
return;
pkt = cmd.resp_pkt;
if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_22000)
if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000)
iwl_parse_shared_mem_22000(fwrt, pkt);
else
iwl_parse_shared_mem(fwrt, pkt);
......
......@@ -93,6 +93,7 @@ enum iwl_device_family {
IWL_DEVICE_FAMILY_8000,
IWL_DEVICE_FAMILY_9000,
IWL_DEVICE_FAMILY_22000,
IWL_DEVICE_FAMILY_22560,
};
/*
......@@ -176,6 +177,7 @@ static inline u8 num_of_ant(u8 mask)
* @apmg_wake_up_wa: should the MAC access REQ be asserted when a command
* is in flight. This is due to a HW bug in 7260, 3160 and 7265.
* @scd_chain_ext_wa: should the chain extension feature in SCD be disabled.
* @max_tfd_queue_size: max number of entries in tfd queue.
*/
struct iwl_base_params {
unsigned int wd_timeout;
......@@ -191,6 +193,7 @@ struct iwl_base_params {
scd_chain_ext_wa:1;
u16 num_of_queues; /* def: HW dependent */
u32 max_tfd_queue_size; /* def: HW dependent */
u8 max_ll_items;
u8 led_compensation;
......@@ -566,9 +569,11 @@ extern const struct iwl_cfg iwl22000_2ac_cfg_hr;
extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb;
extern const struct iwl_cfg iwl22000_2ac_cfg_jf;
extern const struct iwl_cfg iwl22000_2ax_cfg_hr;
extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_f0;
extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0;
extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0;
extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0;
extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0;
extern const struct iwl_cfg iwl22560_2ax_cfg_su_cdb;
#endif /* CONFIG_IWLMVM */
#endif /* __IWL_CONFIG_H__ */
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#ifndef __iwl_context_info_file_gen3_h__
#define __iwl_context_info_file_gen3_h__
#include "iwl-context-info.h"
#define CSR_CTXT_INFO_BOOT_CTRL 0x0
#define CSR_CTXT_INFO_ADDR 0x118
#define CSR_IML_DATA_ADDR 0x120
#define CSR_IML_SIZE_ADDR 0x128
#define CSR_IML_RESP_ADDR 0x12c
/* Set bit for enabling automatic function boot */
#define CSR_AUTO_FUNC_BOOT_ENA BIT(1)
/* Set bit for initiating function boot */
#define CSR_AUTO_FUNC_INIT BIT(7)
/**
* enum iwl_prph_scratch_mtr_format - tfd size configuration
* @IWL_PRPH_MTR_FORMAT_16B: 16 bit tfd
* @IWL_PRPH_MTR_FORMAT_32B: 32 bit tfd
* @IWL_PRPH_MTR_FORMAT_64B: 64 bit tfd
* @IWL_PRPH_MTR_FORMAT_256B: 256 bit tfd
*/
enum iwl_prph_scratch_mtr_format {
IWL_PRPH_MTR_FORMAT_16B = 0x0,
IWL_PRPH_MTR_FORMAT_32B = 0x40000,
IWL_PRPH_MTR_FORMAT_64B = 0x80000,
IWL_PRPH_MTR_FORMAT_256B = 0xC0000,
};
/**
* enum iwl_prph_scratch_flags - PRPH scratch control flags
* @IWL_PRPH_SCRATCH_EARLY_DEBUG_EN: enable early debug conf
* @IWL_PRPH_SCRATCH_EDBG_DEST_DRAM: use DRAM, with size allocated
* in hwm config.
* @IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL: use buffer on SRAM
* @IWL_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER: use st arbiter, mainly for
* multicomm.
* @IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF: route debug data to SoC HW
* @IWL_PRPH_SCTATCH_RB_SIZE_4K: Use 4K RB size (the default is 2K)
* @IWL_PRPH_SCRATCH_MTR_MODE: format used for completion - 0: for
* completion descriptor, 1 for responses (legacy)
* @IWL_PRPH_SCRATCH_MTR_FORMAT: a mask for the size of the tfd.
* There are 4 optional values: 0: 16 bit, 1: 32 bit, 2: 64 bit,
* 3: 256 bit.
*/
enum iwl_prph_scratch_flags {
IWL_PRPH_SCRATCH_EARLY_DEBUG_EN = BIT(4),
IWL_PRPH_SCRATCH_EDBG_DEST_DRAM = BIT(8),
IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL = BIT(9),
IWL_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER = BIT(10),
IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF = BIT(11),
IWL_PRPH_SCRATCH_RB_SIZE_4K = BIT(16),
IWL_PRPH_SCRATCH_MTR_MODE = BIT(17),
IWL_PRPH_SCRATCH_MTR_FORMAT = BIT(18) | BIT(19),
};
/*
* struct iwl_prph_scratch_version - version structure
* @mac_id: SKU and revision id
* @version: prph scratch information version id
* @size: the size of the context information in DWs
* @reserved: reserved
*/
struct iwl_prph_scratch_version {
__le16 mac_id;
__le16 version;
__le16 size;
__le16 reserved;
} __packed; /* PERIPH_SCRATCH_VERSION_S */
/*
* struct iwl_prph_scratch_control - control structure
* @control_flags: context information flags see &enum iwl_prph_scratch_flags
* @reserved: reserved
*/
struct iwl_prph_scratch_control {
__le32 control_flags;
__le32 reserved;
} __packed; /* PERIPH_SCRATCH_CONTROL_S */
/*
* struct iwl_prph_scratch_ror_cfg - ror config
* @ror_base_addr: ror start address
* @ror_size: ror size in DWs
* @reserved: reserved
*/
struct iwl_prph_scratch_ror_cfg {
__le64 ror_base_addr;
__le32 ror_size;
__le32 reserved;
} __packed; /* PERIPH_SCRATCH_ROR_CFG_S */
/*
* struct iwl_prph_scratch_hwm_cfg - hwm config
* @hwm_base_addr: hwm start address
* @hwm_size: hwm size in DWs
* @reserved: reserved
*/
struct iwl_prph_scratch_hwm_cfg {
__le64 hwm_base_addr;
__le32 hwm_size;
__le32 reserved;
} __packed; /* PERIPH_SCRATCH_HWM_CFG_S */
/*
* struct iwl_prph_scratch_rbd_cfg - RBDs configuration
* @free_rbd_addr: default queue free RB CB base address
* @reserved: reserved
*/
struct iwl_prph_scratch_rbd_cfg {
__le64 free_rbd_addr;
__le32 reserved;
} __packed; /* PERIPH_SCRATCH_RBD_CFG_S */
/*
* struct iwl_prph_scratch_ctrl_cfg - prph scratch ctrl and config
* @version: version information of context info and HW
* @control: control flags of FH configurations
* @ror_cfg: ror configuration
* @hwm_cfg: hwm configuration
* @rbd_cfg: default RX queue configuration
*/
struct iwl_prph_scratch_ctrl_cfg {
struct iwl_prph_scratch_version version;
struct iwl_prph_scratch_control control;
struct iwl_prph_scratch_ror_cfg ror_cfg;
struct iwl_prph_scratch_hwm_cfg hwm_cfg;
struct iwl_prph_scratch_rbd_cfg rbd_cfg;
} __packed; /* PERIPH_SCRATCH_CTRL_CFG_S */
/*
* struct iwl_prph_scratch - peripheral scratch mapping
* @ctrl_cfg: control and configuration of prph scratch
* @dram: firmware images addresses in DRAM
* @reserved: reserved
*/
struct iwl_prph_scratch {
struct iwl_prph_scratch_ctrl_cfg ctrl_cfg;
__le32 reserved[16];
struct iwl_context_info_dram dram;
} __packed; /* PERIPH_SCRATCH_S */
/*
* struct iwl_prph_info - peripheral information
* @boot_stage_mirror: reflects the value in the Boot Stage CSR register
* @ipc_status_mirror: reflects the value in the IPC Status CSR register
* @sleep_notif: indicates the peripheral sleep status
* @reserved: reserved
*/
struct iwl_prph_info {
__le32 boot_stage_mirror;
__le32 ipc_status_mirror;
__le32 sleep_notif;
__le32 reserved;
} __packed; /* PERIPH_INFO_S */
/*
* struct iwl_context_info_gen3 - device INIT configuration
* @version: version of the context information
* @size: size of context information in DWs
* @config: context in which the peripheral would execute - a subset of
* capability csr register published by the peripheral
* @prph_info_base_addr: the peripheral information structure start address
* @cr_head_idx_arr_base_addr: the completion ring head index array
* start address
* @tr_tail_idx_arr_base_addr: the transfer ring tail index array
* start address
* @cr_tail_idx_arr_base_addr: the completion ring tail index array
* start address
* @tr_head_idx_arr_base_addr: the transfer ring head index array
* start address
* @cr_idx_arr_size: number of entries in the completion ring index array
* @tr_idx_arr_size: number of entries in the transfer ring index array
* @mtr_base_addr: the message transfer ring start address
* @mcr_base_addr: the message completion ring start address
* @mtr_size: number of entries which the message transfer ring can hold
* @mcr_size: number of entries which the message completion ring can hold
* @mtr_doorbell_vec: the doorbell vector associated with the message
* transfer ring
* @mcr_doorbell_vec: the doorbell vector associated with the message
* completion ring
* @mtr_msi_vec: the MSI which shall be generated by the peripheral after
* completing a transfer descriptor in the message transfer ring
* @mcr_msi_vec: the MSI which shall be generated by the peripheral after
* completing a completion descriptor in the message completion ring
* @mtr_opt_header_size: the size of the optional header in the transfer
* descriptor associated with the message transfer ring in DWs
* @mtr_opt_footer_size: the size of the optional footer in the transfer
* descriptor associated with the message transfer ring in DWs
* @mcr_opt_header_size: the size of the optional header in the completion
* descriptor associated with the message completion ring in DWs
* @mcr_opt_footer_size: the size of the optional footer in the completion
* descriptor associated with the message completion ring in DWs
* @msg_rings_ctrl_flags: message rings control flags
* @prph_info_msi_vec: the MSI which shall be generated by the peripheral
* after updating the Peripheral Information structure
* @prph_scratch_base_addr: the peripheral scratch structure start address
* @prph_scratch_size: the size of the peripheral scratch structure in DWs
* @reserved: reserved
*/
struct iwl_context_info_gen3 {
__le16 version;
__le16 size;
__le32 config;
__le64 prph_info_base_addr;
__le64 cr_head_idx_arr_base_addr;
__le64 tr_tail_idx_arr_base_addr;
__le64 cr_tail_idx_arr_base_addr;
__le64 tr_head_idx_arr_base_addr;
__le16 cr_idx_arr_size;
__le16 tr_idx_arr_size;
__le64 mtr_base_addr;
__le64 mcr_base_addr;
__le16 mtr_size;
__le16 mcr_size;
__le16 mtr_doorbell_vec;
__le16 mcr_doorbell_vec;
__le16 mtr_msi_vec;
__le16 mcr_msi_vec;
u8 mtr_opt_header_size;
u8 mtr_opt_footer_size;
u8 mcr_opt_header_size;
u8 mcr_opt_footer_size;
__le16 msg_rings_ctrl_flags;
__le16 prph_info_msi_vec;
__le64 prph_scratch_base_addr;
__le32 prph_scratch_size;
__le32 reserved;
} __packed; /* IPC_CONTEXT_INFO_S */
int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
const struct fw_img *fw);
void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans);
#endif /* __iwl_context_info_file_gen3_h__ */
......@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
......@@ -19,6 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -199,5 +201,8 @@ struct iwl_context_info {
int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, const struct fw_img *fw);
void iwl_pcie_ctxt_info_free(struct iwl_trans *trans);
void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans);
int iwl_pcie_init_fw_sec(struct iwl_trans *trans,
const struct fw_img *fw,
struct iwl_context_info_dram *ctxt_dram);
#endif /* __iwl_context_info_file_h__ */
......@@ -339,6 +339,9 @@ enum {
/* HW_RF CHIP ID */
#define CSR_HW_RF_ID_TYPE_CHIP_ID(_val) (((_val) >> 12) & 0xFFF)
/* HW_RF CHIP STEP */
#define CSR_HW_RF_STEP(_val) (((_val) >> 8) & 0xF)
/* EEPROM REG */
#define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001)
#define CSR_EEPROM_REG_BIT_CMD (0x00000002)
......@@ -592,6 +595,8 @@ enum msix_fh_int_causes {
enum msix_hw_int_causes {
MSIX_HW_INT_CAUSES_REG_ALIVE = BIT(0),
MSIX_HW_INT_CAUSES_REG_WAKEUP = BIT(1),
MSIX_HW_INT_CAUSES_REG_IPC = BIT(1),
MSIX_HW_INT_CAUSES_REG_SW_ERR_V2 = BIT(5),
MSIX_HW_INT_CAUSES_REG_CT_KILL = BIT(6),
MSIX_HW_INT_CAUSES_REG_RF_KILL = BIT(7),
MSIX_HW_INT_CAUSES_REG_PERIODIC = BIT(8),
......
......@@ -1787,7 +1787,8 @@ MODULE_PARM_DESC(11n_disable,
"disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX");
module_param_named(amsdu_size, iwlwifi_mod_params.amsdu_size, int, 0444);
MODULE_PARM_DESC(amsdu_size,
"amsdu size 0: 12K for multi Rx queue devices, 4K for other devices 1:4K 2:8K 3:12K (default 0)");
"amsdu size 0: 12K for multi Rx queue devices, 2K for 22560 devices, "
"4K for other devices 1:4K 2:8K 3:12K 4: 2K (default 0)");
module_param_named(fw_restart, iwlwifi_mod_params.fw_restart, bool, 0444);
MODULE_PARM_DESC(fw_restart, "restart firmware in case of error (default true)");
......@@ -1856,3 +1857,7 @@ module_param_named(remove_when_gone,
0444);
MODULE_PARM_DESC(remove_when_gone,
"Remove dev from PCIe bus if it is deemed inaccessible (default: false)");
module_param_named(disable_11ax, iwlwifi_mod_params.disable_11ax, bool,
S_IRUGO);
MODULE_PARM_DESC(disable_11ax, "Disable HE capabilities (default: false)");
......@@ -7,6 +7,7 @@
*
* Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Mobile Communications GmbH
* Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
......@@ -18,9 +19,7 @@
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
* along with this program;
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
......@@ -33,6 +32,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 Intel Mobile Communications GmbH
* Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -767,7 +767,7 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING;
if ((cfg->mq_rx_supported &&
iwlwifi_mod_params.amsdu_size != IWL_AMSDU_4K) ||
iwlwifi_mod_params.amsdu_size == IWL_AMSDU_DEF) ||
iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K)
ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU;
......
......@@ -7,6 +7,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
......@@ -18,9 +19,7 @@
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
* along with this program.
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
......@@ -33,6 +32,7 @@
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -435,12 +435,14 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans,
* Once the RXF-to-DRAM DMA is active, this flag is immediately turned off.
*/
#define RFH_GEN_STATUS 0xA09808
#define RFH_GEN_STATUS_GEN3 0xA07824
#define RBD_FETCH_IDLE BIT(29)
#define SRAM_DMA_IDLE BIT(30)
#define RXF_DMA_IDLE BIT(31)
/* DMA configuration */
#define RFH_RXF_DMA_CFG 0xA09820
#define RFH_RXF_DMA_CFG_GEN3 0xA07880
/* RB size */
#define RFH_RXF_DMA_RB_SIZE_MASK (0x000F0000) /* bits 16-19 */
#define RFH_RXF_DMA_RB_SIZE_POS 16
......@@ -643,10 +645,13 @@ struct iwl_rb_status {
#define TFD_QUEUE_SIZE_MAX (256)
#define TFD_QUEUE_SIZE_MAX_GEN3 (65536)
/* cb size is the exponent - 3 */
#define TFD_QUEUE_CB_SIZE(x) (ilog2(x) - 3)
#define TFD_QUEUE_SIZE_BC_DUP (64)
#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
#define TFD_QUEUE_BC_SIZE_GEN3 (TFD_QUEUE_SIZE_MAX_GEN3 + \
TFD_QUEUE_SIZE_BC_DUP)
#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
#define IWL_NUM_OF_TBS 20
#define IWL_TFH_NUM_TBS 25
......@@ -753,7 +758,7 @@ struct iwl_tfh_tfd {
* For devices up to 22000:
* @tfd_offset 0-12 - tx command byte count
* 12-16 - station index
* For 22000 and on:
* For 22000:
* @tfd_offset 0-12 - tx command byte count
* 12-13 - number of 64 byte chunks
* 14-16 - reserved
......@@ -762,4 +767,15 @@ struct iwlagn_scd_bc_tbl {
__le16 tfd_offset[TFD_QUEUE_BC_SIZE];
} __packed;
/**
* struct iwl_gen3_bc_tbl scheduler byte count table gen3
* For 22560 and on:
* @tfd_offset: 0-12 - tx command byte count
* 12-13 - number of 64 byte chunks
* 14-16 - reserved
*/
struct iwl_gen3_bc_tbl {
__le16 tfd_offset[TFD_QUEUE_BC_SIZE_GEN3];
} __packed;
#endif /* !__iwl_fh_h__ */
......@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
......@@ -17,9 +18,7 @@
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
* along with this program;
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
......@@ -31,6 +30,7 @@
* BSD LICENSE
*
* Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -90,6 +90,8 @@ enum iwl_amsdu_size {
IWL_AMSDU_4K = 1,
IWL_AMSDU_8K = 2,
IWL_AMSDU_12K = 3,
/* Add 2K at the end to avoid breaking current API */
IWL_AMSDU_2K = 4,
};
enum iwl_uapsd_disable {
......@@ -144,6 +146,10 @@ struct iwl_mod_params {
bool lar_disable;
bool fw_monitor;
bool disable_11ac;
/**
* @disable_11ax: disable HE capabilities, default = false
*/
bool disable_11ax;
bool remove_when_gone;
};
......
......@@ -430,6 +430,13 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
else
vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
break;
case IWL_AMSDU_2K:
if (cfg->mq_rx_supported)
vht_cap->cap |=
IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454;
else
WARN(1, "RB size of 2K is not supported by this device\n");
break;
case IWL_AMSDU_4K:
vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895;
break;
......@@ -463,6 +470,101 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map;
}
static struct ieee80211_sband_iftype_data iwl_he_capa = {
.types_mask = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP),
.he_cap = {
.has_he = true,
.he_cap_elem = {
.mac_cap_info[0] =
IEEE80211_HE_MAC_CAP0_HTC_HE,
.mac_cap_info[1] =
IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US |
IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8,
.mac_cap_info[2] =
IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP |
IEEE80211_HE_MAC_CAP2_ACK_EN,
.mac_cap_info[3] =
IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU |
IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2,
.mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU,
.phy_cap_info[0] =
IEEE80211_HE_PHY_CAP0_DUAL_BAND |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G |
IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G,
.phy_cap_info[1] =
IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A |
IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD |
IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS,
.phy_cap_info[2] =
IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US |
IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ |
IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ,
.phy_cap_info[3] =
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK |
IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 |
IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK |
IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1,
.phy_cap_info[4] =
IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE |
IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 |
IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8,
.phy_cap_info[5] =
IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 |
IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2,
.phy_cap_info[6] =
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT,
.phy_cap_info[7] =
IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR |
IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI |
IEEE80211_HE_PHY_CAP7_MAX_NC_7,
.phy_cap_info[8] =
IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI |
IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G |
IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU |
IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU,
},
/*
* Set default Tx/Rx HE MCS NSS Support field. Indicate support
* for up to 2 spatial streams and all MCS, without any special
* cases
*/
.he_mcs_nss_supp = {
.rx_mcs_80 = cpu_to_le16(0xfffa),
.tx_mcs_80 = cpu_to_le16(0xfffa),
.rx_mcs_160 = cpu_to_le16(0xfffa),
.tx_mcs_160 = cpu_to_le16(0xfffa),
.rx_mcs_80p80 = cpu_to_le16(0xffff),
.tx_mcs_80p80 = cpu_to_le16(0xffff),
},
/*
* Set default PPE thresholds, with PPET16 set to 0, PPET8 set
* to 7
*/
.ppe_thres = {0x61, 0x1c, 0xc7, 0x71},
},
};
static void iwl_init_he_hw_capab(struct ieee80211_supported_band *sband,
u8 tx_chains, u8 rx_chains)
{
if (sband->band == NL80211_BAND_2GHZ ||
sband->band == NL80211_BAND_5GHZ)
sband->iftype_data = &iwl_he_capa;
else
return;
sband->n_iftype_data = 1;
/* If not 2x2, we need to indicate 1x1 in the Midamble RX Max NSTS */
if ((tx_chains & rx_chains) != ANT_AB) {
iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[1] &=
~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS;
iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[2] &=
~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_MAX_NSTS;
}
}
static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
const __le16 *nvm_ch_flags, u8 tx_chains,
......@@ -483,6 +585,9 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ,
tx_chains, rx_chains);
if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
iwl_init_he_hw_capab(sband, tx_chains, rx_chains);
sband = &data->bands[NL80211_BAND_5GHZ];
sband->band = NL80211_BAND_5GHZ;
sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS];
......@@ -495,6 +600,9 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap,
tx_chains, rx_chains);
if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax)
iwl_init_he_hw_capab(sband, tx_chains, rx_chains);
if (n_channels != n_used)
IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n",
n_used, n_channels);
......@@ -1293,6 +1401,8 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans,
!!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AC_ENABLED);
nvm->sku_cap_11n_enable =
!!(mac_flags & NVM_MAC_SKU_FLAGS_802_11N_ENABLED);
nvm->sku_cap_11ax_enable =
!!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AX_ENABLED);
nvm->sku_cap_band_24ghz_enable =
!!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED);
nvm->sku_cap_band_52ghz_enable =
......
......@@ -350,6 +350,8 @@ static inline int
iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
{
switch (rb_size) {
case IWL_AMSDU_2K:
return get_order(2 * 1024);
case IWL_AMSDU_4K:
return get_order(4 * 1024);
case IWL_AMSDU_8K:
......
......@@ -301,7 +301,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
if (ret) {
struct iwl_trans *trans = mvm->trans;
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22000)
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000)
IWL_ERR(mvm,
"SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n",
iwl_read_prph(trans, UMAG_SB_CPU_1_STATUS),
......@@ -1009,7 +1009,7 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
/* Init RSS configuration */
/* TODO - remove 22000 disablement when we have RXQ config API */
if (iwl_mvm_has_new_rx_api(mvm) &&
mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_22000) {
mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_22000) {
ret = iwl_send_rss_cfg_cmd(mvm);
if (ret) {
IWL_ERR(mvm, "Failed to configure RSS queues: %d\n",
......
......@@ -780,6 +780,10 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm,
if (vif->probe_req_reg && vif->bss_conf.assoc && vif->p2p)
cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST);
if (vif->bss_conf.assoc && vif->bss_conf.he_support &&
!iwlwifi_mod_params.disable_11ax)
cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX);
return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd);
}
......
......@@ -36,6 +36,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -914,7 +915,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
enum ieee80211_ampdu_mlme_action action = params->action;
u16 tid = params->tid;
u16 *ssn = &params->ssn;
u8 buf_size = params->buf_size;
u16 buf_size = params->buf_size;
bool amsdu = params->amsdu;
u16 timeout = params->timeout;
......@@ -1897,6 +1898,194 @@ void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm,
iwl_mvm_mu_mimo_iface_iterator, notif);
}
static u8 iwl_mvm_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit)
{
u8 byte_num = ppe_pos_bit / 8;
u8 bit_num = ppe_pos_bit % 8;
u8 residue_bits;
u8 res;
if (bit_num <= 5)
return (ppe[byte_num] >> bit_num) &
(BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE) - 1);
/*
* If bit_num > 5, we have to combine bits with next byte.
* Calculate how many bits we need to take from current byte (called
* here "residue_bits"), and add them to bits from next byte.
*/
residue_bits = 8 - bit_num;
res = (ppe[byte_num + 1] &
(BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE - residue_bits) - 1)) <<
residue_bits;
res += (ppe[byte_num] >> bit_num) & (BIT(residue_bits) - 1);
return res;
}
static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm,
struct ieee80211_vif *vif, u8 sta_id)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct iwl_he_sta_context_cmd sta_ctxt_cmd = {
.sta_id = sta_id,
.tid_limit = IWL_MAX_TID_COUNT,
.bss_color = vif->bss_conf.bss_color,
.htc_trig_based_pkt_ext = vif->bss_conf.htc_trig_based_pkt_ext,
.frame_time_rts_th =
cpu_to_le16(vif->bss_conf.frame_time_rts_th),
};
struct ieee80211_sta *sta;
u32 flags;
int i;
rcu_read_lock();
sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]);
if (IS_ERR(sta)) {
rcu_read_unlock();
WARN(1, "Can't find STA to configure HE\n");
return;
}
if (!sta->he_cap.has_he) {
rcu_read_unlock();
return;
}
flags = 0;
/* HTC flags */
if (sta->he_cap.he_cap_elem.mac_cap_info[0] &
IEEE80211_HE_MAC_CAP0_HTC_HE)
sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_SUPPORT);
if ((sta->he_cap.he_cap_elem.mac_cap_info[1] &
IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION) ||
(sta->he_cap.he_cap_elem.mac_cap_info[2] &
IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION)) {
u8 link_adap =
((sta->he_cap.he_cap_elem.mac_cap_info[2] &
IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION) << 1) +
(sta->he_cap.he_cap_elem.mac_cap_info[1] &
IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION);
if (link_adap == 2)
sta_ctxt_cmd.htc_flags |=
cpu_to_le32(IWL_HE_HTC_LINK_ADAP_UNSOLICITED);
else if (link_adap == 3)
sta_ctxt_cmd.htc_flags |=
cpu_to_le32(IWL_HE_HTC_LINK_ADAP_BOTH);
}
if (sta->he_cap.he_cap_elem.mac_cap_info[2] &
IEEE80211_HE_MAC_CAP2_UL_MU_RESP_SCHED)
sta_ctxt_cmd.htc_flags |=
cpu_to_le32(IWL_HE_HTC_UL_MU_RESP_SCHED);
if (sta->he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR)
sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BSR_SUPP);
if (sta->he_cap.he_cap_elem.mac_cap_info[3] &
IEEE80211_HE_MAC_CAP3_OMI_CONTROL)
sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_OMI_SUPP);
if (sta->he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR)
sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BQR_SUPP);
/* If PPE Thresholds exist, parse them into a FW-familiar format */
if (sta->he_cap.he_cap_elem.phy_cap_info[6] &
IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) {
u8 nss = (sta->he_cap.ppe_thres[0] &
IEEE80211_PPE_THRES_NSS_MASK) + 1;
u8 ru_index_bitmap =
(sta->he_cap.ppe_thres[0] &
IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >>
IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS;
u8 *ppe = &sta->he_cap.ppe_thres[0];
u8 ppe_pos_bit = 7; /* Starting after PPE header */
/*
* FW currently supports only nss == MAX_HE_SUPP_NSS
*
* If nss > MAX: we can ignore values we don't support
* If nss < MAX: we can set zeros in other streams
*/
if (nss > MAX_HE_SUPP_NSS) {
IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss,
MAX_HE_SUPP_NSS);
nss = MAX_HE_SUPP_NSS;
}
for (i = 0; i < nss; i++) {
u8 ru_index_tmp = ru_index_bitmap << 1;
u8 bw;
for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX; bw++) {
ru_index_tmp >>= 1;
if (!(ru_index_tmp & 1))
continue;
sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][1] =
iwl_mvm_he_get_ppe_val(ppe,
ppe_pos_bit);
ppe_pos_bit +=
IEEE80211_PPE_THRES_INFO_PPET_SIZE;
sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][0] =
iwl_mvm_he_get_ppe_val(ppe,
ppe_pos_bit);
ppe_pos_bit +=
IEEE80211_PPE_THRES_INFO_PPET_SIZE;
}
}
flags |= STA_CTXT_HE_PACKET_EXT;
}
rcu_read_unlock();
/* Mark MU EDCA as enabled, unless none detected on some AC */
flags |= STA_CTXT_HE_MU_EDCA_CW;
for (i = 0; i < AC_NUM; i++) {
struct ieee80211_he_mu_edca_param_ac_rec *mu_edca =
&mvmvif->queue_params[i].mu_edca_param_rec;
if (!mvmvif->queue_params[i].mu_edca) {
flags &= ~STA_CTXT_HE_MU_EDCA_CW;
break;
}
sta_ctxt_cmd.trig_based_txf[i].cwmin =
cpu_to_le16(mu_edca->ecw_min_max & 0xf);
sta_ctxt_cmd.trig_based_txf[i].cwmax =
cpu_to_le16((mu_edca->ecw_min_max & 0xf0) >> 4);
sta_ctxt_cmd.trig_based_txf[i].aifsn =
cpu_to_le16(mu_edca->aifsn);
sta_ctxt_cmd.trig_based_txf[i].mu_time =
cpu_to_le16(mu_edca->mu_edca_timer);
}
if (vif->bss_conf.multi_sta_back_32bit)
flags |= STA_CTXT_HE_32BIT_BA_BITMAP;
if (vif->bss_conf.ack_enabled)
flags |= STA_CTXT_HE_ACK_ENABLED;
if (vif->bss_conf.uora_exists) {
flags |= STA_CTXT_HE_TRIG_RND_ALLOC;
sta_ctxt_cmd.rand_alloc_ecwmin =
vif->bss_conf.uora_ocw_range & 0x7;
sta_ctxt_cmd.rand_alloc_ecwmax =
(vif->bss_conf.uora_ocw_range >> 3) & 0x7;
}
/* TODO: support Multi BSSID IE */
sta_ctxt_cmd.flags = cpu_to_le32(flags);
if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(STA_HE_CTXT_CMD,
DATA_PATH_GROUP, 0),
0, sizeof(sta_ctxt_cmd), &sta_ctxt_cmd))
IWL_ERR(mvm, "Failed to config FW to work HE!\n");
}
static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct ieee80211_bss_conf *bss_conf,
......@@ -1910,8 +2099,13 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
* beacon interval, which was not known when the station interface was
* added.
*/
if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc)
if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) {
if (vif->bss_conf.he_support &&
!iwlwifi_mod_params.disable_11ax)
iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id);
iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif);
}
/*
* If we're not associated yet, take the (new) BSSID before associating
......@@ -4365,7 +4559,7 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm,
mvm->trans->num_rx_queues);
/* TODO - remove this when we have RXQ config API */
if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_22000) {
if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) {
qmask = BIT(0);
if (notif->sync)
atomic_set(&mvm->queue_sync_counter, 1);
......
......@@ -654,7 +654,7 @@ struct iwl_mvm_tcm {
struct iwl_mvm_reorder_buffer {
u16 head_sn;
u16 num_stored;
u8 buf_size;
u16 buf_size;
int queue;
u16 last_amsdu;
u8 last_sub_index;
......
......@@ -448,6 +448,7 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = {
HCMD_NAME(DQA_ENABLE_CMD),
HCMD_NAME(UPDATE_MU_GROUPS_CMD),
HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD),
HCMD_NAME(STA_HE_CTXT_CMD),
HCMD_NAME(STA_PM_NOTIF),
HCMD_NAME(MU_GROUP_MGMT_NOTIF),
HCMD_NAME(RX_QUEUES_NOTIFICATION),
......@@ -703,11 +704,15 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
}
/* the hardware splits the A-MSDU */
if (mvm->cfg->mq_rx_supported)
if (mvm->trans->cfg->device_family >=
IWL_DEVICE_FAMILY_22560)
trans_cfg.rx_buf_size = IWL_AMSDU_2K;
else if (mvm->cfg->mq_rx_supported)
trans_cfg.rx_buf_size = IWL_AMSDU_4K;
trans->wide_cmd_header = true;
trans_cfg.bc_table_dword = true;
trans_cfg.bc_table_dword =
mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_22560;
trans_cfg.command_groups = iwl_mvm_groups;
trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups);
......
......@@ -183,6 +183,43 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta,
}
}
static u16 rs_fw_he_ieee80211_mcs_to_rs_mcs(u16 mcs)
{
switch (mcs) {
case IEEE80211_HE_MCS_SUPPORT_0_7:
return BIT(IWL_TLC_MNG_HT_RATE_MCS7 + 1) - 1;
case IEEE80211_HE_MCS_SUPPORT_0_9:
return BIT(IWL_TLC_MNG_HT_RATE_MCS9 + 1) - 1;
case IEEE80211_HE_MCS_SUPPORT_0_11:
return BIT(IWL_TLC_MNG_HT_RATE_MCS11 + 1) - 1;
case IEEE80211_HE_MCS_NOT_SUPPORTED:
return 0;
}
WARN(1, "invalid HE MCS %d\n", mcs);
return 0;
}
static void
rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta,
const struct ieee80211_sta_he_cap *he_cap,
struct iwl_tlc_config_cmd *cmd)
{
u16 mcs_160 = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_160);
u16 mcs_80 = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_80);
int i;
for (i = 0; i < sta->rx_nss && i < MAX_NSS; i++) {
u16 _mcs_160 = (mcs_160 >> (2 * i)) & 0x3;
u16 _mcs_80 = (mcs_80 >> (2 * i)) & 0x3;
cmd->ht_rates[i][0] =
cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_80));
cmd->ht_rates[i][1] =
cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_160));
}
}
static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
struct ieee80211_supported_band *sband,
struct iwl_tlc_config_cmd *cmd)
......@@ -192,6 +229,7 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
unsigned long supp; /* must be unsigned long for for_each_set_bit */
const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap;
/* non HT rates */
supp = 0;
......@@ -202,7 +240,11 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta,
cmd->non_ht_rates = cpu_to_le16(supp);
cmd->mode = IWL_TLC_MNG_MODE_NON_HT;
if (vht_cap && vht_cap->vht_supported) {
/* HT/VHT rates */
if (he_cap && he_cap->has_he) {
cmd->mode = IWL_TLC_MNG_MODE_HE;
rs_fw_he_set_enabled_rates(sta, he_cap, cmd);
} else if (vht_cap && vht_cap->vht_supported) {
cmd->mode = IWL_TLC_MNG_MODE_VHT;
rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd);
} else if (ht_cap && ht_cap->ht_supported) {
......
......@@ -363,7 +363,8 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
idx += 1;
if ((idx >= IWL_FIRST_HT_RATE) && (idx <= IWL_LAST_HT_RATE))
return idx;
} else if (rate_n_flags & RATE_MCS_VHT_MSK) {
} else if (rate_n_flags & RATE_MCS_VHT_MSK ||
rate_n_flags & RATE_MCS_HE_MSK) {
idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
idx += IWL_RATE_MCS_0_INDEX;
......@@ -372,6 +373,9 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags)
idx++;
if ((idx >= IWL_FIRST_VHT_RATE) && (idx <= IWL_LAST_VHT_RATE))
return idx;
if ((rate_n_flags & RATE_MCS_HE_MSK) &&
(idx <= IWL_LAST_HE_RATE))
return idx;
} else {
/* legacy rate format, search for match in table */
......@@ -516,6 +520,8 @@ static const char *rs_pretty_lq_type(enum iwl_table_type type)
[LQ_HT_MIMO2] = "HT MIMO",
[LQ_VHT_SISO] = "VHT SISO",
[LQ_VHT_MIMO2] = "VHT MIMO",
[LQ_HE_SISO] = "HE SISO",
[LQ_HE_MIMO2] = "HE MIMO",
};
if (type < LQ_NONE || type >= LQ_MAX)
......@@ -900,7 +906,8 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
/* Legacy */
if (!(ucode_rate & RATE_MCS_HT_MSK) &&
!(ucode_rate & RATE_MCS_VHT_MSK)) {
!(ucode_rate & RATE_MCS_VHT_MSK) &&
!(ucode_rate & RATE_MCS_HE_MSK)) {
if (num_of_ant == 1) {
if (band == NL80211_BAND_5GHZ)
rate->type = LQ_LEGACY_A;
......@@ -911,7 +918,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
return 0;
}
/* HT or VHT */
/* HT, VHT or HE */
if (ucode_rate & RATE_MCS_SGI_MSK)
rate->sgi = true;
if (ucode_rate & RATE_MCS_LDPC_MSK)
......@@ -953,10 +960,24 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate,
} else {
WARN_ON_ONCE(1);
}
} else if (ucode_rate & RATE_MCS_HE_MSK) {
nss = ((ucode_rate & RATE_VHT_MCS_NSS_MSK) >>
RATE_VHT_MCS_NSS_POS) + 1;
if (nss == 1) {
rate->type = LQ_HE_SISO;
WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1,
"stbc %d bfer %d", rate->stbc, rate->bfer);
} else if (nss == 2) {
rate->type = LQ_HE_MIMO2;
WARN_ON_ONCE(num_of_ant != 2);
} else {
WARN_ON_ONCE(1);
}
}
WARN_ON_ONCE(rate->bw == RATE_MCS_CHAN_WIDTH_80 &&
!is_vht(rate));
!is_he(rate) && !is_vht(rate));
return 0;
}
......@@ -3606,7 +3627,8 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
u8 ant = (rate & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS;
if (!(rate & RATE_MCS_HT_MSK) &&
!(rate & RATE_MCS_VHT_MSK)) {
!(rate & RATE_MCS_VHT_MSK) &&
!(rate & RATE_MCS_HE_MSK)) {
int index = iwl_hwrate_to_plcp_idx(rate);
return scnprintf(buf, bufsz, "Legacy | ANT: %s Rate: %s Mbps\n",
......@@ -3625,6 +3647,11 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate)
mcs = rate & RATE_HT_MCS_INDEX_MSK;
nss = ((rate & RATE_HT_MCS_NSS_MSK)
>> RATE_HT_MCS_NSS_POS) + 1;
} else if (rate & RATE_MCS_HE_MSK) {
type = "HE";
mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK;
nss = ((rate & RATE_VHT_MCS_NSS_MSK)
>> RATE_VHT_MCS_NSS_POS) + 1;
} else {
type = "Unknown"; /* shouldn't happen */
}
......@@ -3886,6 +3913,8 @@ static ssize_t rs_sta_dbgfs_drv_tx_stats_read(struct file *file,
[IWL_RATE_MCS_7_INDEX] = "MCS7",
[IWL_RATE_MCS_8_INDEX] = "MCS8",
[IWL_RATE_MCS_9_INDEX] = "MCS9",
[IWL_RATE_MCS_10_INDEX] = "MCS10",
[IWL_RATE_MCS_11_INDEX] = "MCS11",
};
char *buff, *pos, *endpos;
......
......@@ -144,8 +144,13 @@ enum {
#define LINK_QUAL_AGG_FRAME_LIMIT_DEF (63)
#define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63)
#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF (64)
#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX (64)
/*
* FIXME - various places in firmware API still use u8,
* e.g. LQ command and SCD config command.
* This should be 256 instead.
*/
#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF (255)
#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX (255)
#define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0)
#define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */
......@@ -162,6 +167,8 @@ enum iwl_table_type {
LQ_HT_MIMO2,
LQ_VHT_SISO, /* VHT types */
LQ_VHT_MIMO2,
LQ_HE_SISO, /* HE types */
LQ_HE_MIMO2,
LQ_MAX,
};
......@@ -183,11 +190,16 @@ struct rs_rate {
#define is_type_ht_mimo2(type) ((type) == LQ_HT_MIMO2)
#define is_type_vht_siso(type) ((type) == LQ_VHT_SISO)
#define is_type_vht_mimo2(type) ((type) == LQ_VHT_MIMO2)
#define is_type_siso(type) (is_type_ht_siso(type) || is_type_vht_siso(type))
#define is_type_mimo2(type) (is_type_ht_mimo2(type) || is_type_vht_mimo2(type))
#define is_type_he_siso(type) ((type) == LQ_HE_SISO)
#define is_type_he_mimo2(type) ((type) == LQ_HE_MIMO2)
#define is_type_siso(type) (is_type_ht_siso(type) || is_type_vht_siso(type) || \
is_type_he_siso(type))
#define is_type_mimo2(type) (is_type_ht_mimo2(type) || \
is_type_vht_mimo2(type) || is_type_he_mimo2(type))
#define is_type_mimo(type) (is_type_mimo2(type))
#define is_type_ht(type) (is_type_ht_siso(type) || is_type_ht_mimo2(type))
#define is_type_vht(type) (is_type_vht_siso(type) || is_type_vht_mimo2(type))
#define is_type_he(type) (is_type_he_siso(type) || is_type_he_mimo2(type))
#define is_type_a_band(type) ((type) == LQ_LEGACY_A)
#define is_type_g_band(type) ((type) == LQ_LEGACY_G)
......@@ -201,6 +213,7 @@ struct rs_rate {
#define is_mimo(rate) is_type_mimo((rate)->type)
#define is_ht(rate) is_type_ht((rate)->type)
#define is_vht(rate) is_type_vht((rate)->type)
#define is_he(rate) is_type_he((rate)->type)
#define is_a_band(rate) is_type_a_band((rate)->type)
#define is_g_band(rate) is_type_g_band((rate)->type)
......
......@@ -8,6 +8,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
......@@ -30,6 +31,7 @@
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2015 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -196,10 +198,20 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm,
struct sk_buff *skb, int queue,
struct ieee80211_sta *sta)
{
if (iwl_mvm_check_pn(mvm, skb, queue, sta))
struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb);
if (iwl_mvm_check_pn(mvm, skb, queue, sta)) {
kfree_skb(skb);
else
} else {
unsigned int radiotap_len = 0;
if (rx_status->flag & RX_FLAG_RADIOTAP_HE)
radiotap_len += sizeof(struct ieee80211_radiotap_he);
if (rx_status->flag & RX_FLAG_RADIOTAP_HE_MU)
radiotap_len += sizeof(struct ieee80211_radiotap_he_mu);
__skb_push(skb, radiotap_len);
ieee80211_rx_napi(mvm->hw, sta, skb, napi);
}
}
static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm,
......@@ -857,6 +869,12 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
struct ieee80211_sta *sta = NULL;
struct sk_buff *skb;
u8 crypt_len = 0;
struct ieee80211_radiotap_he *he = NULL;
struct ieee80211_radiotap_he_mu *he_mu = NULL;
u32 he_type = 0xffffffff;
/* this is invalid e.g. because puncture type doesn't allow 0b11 */
#define HE_PHY_DATA_INVAL ((u64)-1)
u64 he_phy_data = HE_PHY_DATA_INVAL;
if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)))
return;
......@@ -882,6 +900,46 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status = IEEE80211_SKB_RXCB(skb);
if (rate_n_flags & RATE_MCS_HE_MSK) {
static const struct ieee80211_radiotap_he known = {
.data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN |
IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN),
.data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN |
IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN),
};
static const struct ieee80211_radiotap_he_mu mu_known = {
.flags1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN |
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN |
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN |
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN),
.flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN),
};
unsigned int radiotap_len = 0;
he = skb_put_data(skb, &known, sizeof(known));
radiotap_len += sizeof(known);
rx_status->flag |= RX_FLAG_RADIOTAP_HE;
he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK;
if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) {
he_phy_data =
le64_to_cpu(desc->he_phy_data);
if (he_type == RATE_MCS_HE_TYPE_MU) {
he_mu = skb_put_data(skb, &mu_known,
sizeof(mu_known));
radiotap_len += sizeof(mu_known);
rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU;
}
}
/* temporarily hide the radiotap data */
__skb_pull(skb, radiotap_len);
}
if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, phy_info, desc,
le32_to_cpu(pkt->len_n_flags), queue,
&crypt_len)) {
......@@ -907,6 +965,45 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise);
/* TSF as indicated by the firmware is at INA time */
rx_status->flag |= RX_FLAG_MACTIME_PLCP_START;
} else if (he_type == RATE_MCS_HE_TYPE_SU) {
he->data1 |=
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN);
if (FIELD_GET(IWL_RX_HE_PHY_UPLINK,
le64_to_cpu(desc->he_phy_data)))
he->data3 |=
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA3_UL_DL);
if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) {
rx_status->ampdu_reference = mvm->ampdu_ref;
mvm->ampdu_ref++;
rx_status->flag |= RX_FLAG_AMPDU_DETAILS;
rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF,
le64_to_cpu(desc->he_phy_data)))
rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT;
}
} else if (he_mu && he_phy_data != HE_PHY_DATA_INVAL) {
he_mu->flags1 |=
le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIBG_SYM_OR_USER_NUM_MASK,
he_phy_data),
IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS);
he_mu->flags1 |=
le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_DCM,
he_phy_data),
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM);
he_mu->flags1 |=
le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_MCS_MASK,
he_phy_data),
IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS);
he_mu->flags2 |=
le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_COMPRESSION,
he_phy_data),
IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP);
he_mu->flags2 |=
le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_PREAMBLE_PUNC_TYPE_MASK,
he_phy_data),
IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW);
}
rx_status->device_timestamp = le32_to_cpu(desc->gp2_on_air_rise);
rx_status->band = desc->channel > 14 ? NL80211_BAND_5GHZ :
......@@ -925,6 +1022,15 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
if (toggle_bit != mvm->ampdu_toggle) {
mvm->ampdu_ref++;
mvm->ampdu_toggle = toggle_bit;
if (he_phy_data != HE_PHY_DATA_INVAL &&
he_type == RATE_MCS_HE_TYPE_MU) {
rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN;
if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF,
le64_to_cpu(desc->he_phy_data)))
rx_status->flag |=
RX_FLAG_AMPDU_EOF_BIT;
}
}
}
......@@ -1033,7 +1139,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
}
}
/* Set up the HT phy flags */
switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) {
case RATE_MCS_CHAN_WIDTH_20:
break;
......@@ -1048,6 +1153,70 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
break;
}
if (he_type == RATE_MCS_HE_TYPE_EXT_SU &&
rate_n_flags & RATE_MCS_HE_106T_MSK) {
rx_status->bw = RATE_INFO_BW_HE_RU;
rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
}
if (rate_n_flags & RATE_MCS_HE_MSK &&
phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD &&
he_type == RATE_MCS_HE_TYPE_MU) {
/*
* Unfortunately, we have to leave the mac80211 data
* incorrect for the case that we receive an HE-MU
* transmission and *don't* have the he_mu pointer,
* i.e. we don't have the phy data (due to the bits
* being used for TSF). This shouldn't happen though
* as management frames where we need the TSF/timers
* are not be transmitted in HE-MU, I think.
*/
u8 ru = FIELD_GET(IWL_RX_HE_PHY_RU_ALLOC_MASK, he_phy_data);
u8 offs = 0;
rx_status->bw = RATE_INFO_BW_HE_RU;
switch (ru) {
case 0 ... 36:
rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26;
offs = ru;
break;
case 37 ... 52:
rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52;
offs = ru - 37;
break;
case 53 ... 60:
rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106;
offs = ru - 53;
break;
case 61 ... 64:
rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242;
offs = ru - 61;
break;
case 65 ... 66:
rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484;
offs = ru - 65;
break;
case 67:
rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996;
break;
case 68:
rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996;
break;
}
he->data2 |=
le16_encode_bits(offs,
IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET);
he->data2 |=
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN);
if (he_phy_data & IWL_RX_HE_PHY_RU_ALLOC_SEC80)
he->data2 |=
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC);
} else if (he) {
he->data1 |=
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN);
}
if (!(rate_n_flags & RATE_MCS_CCK_MSK) &&
rate_n_flags & RATE_MCS_SGI_MSK)
rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
......@@ -1072,6 +1241,112 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi,
rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
if (rate_n_flags & RATE_MCS_BF_MSK)
rx_status->enc_flags |= RX_ENC_FLAG_BF;
} else if (he) {
u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >>
RATE_MCS_STBC_POS;
rx_status->nss =
((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >>
RATE_VHT_MCS_NSS_POS) + 1;
rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK;
rx_status->encoding = RX_ENC_HE;
rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT;
if (rate_n_flags & RATE_MCS_BF_MSK)
rx_status->enc_flags |= RX_ENC_FLAG_BF;
rx_status->he_dcm =
!!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK);
#define CHECK_TYPE(F) \
BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \
(RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS))
CHECK_TYPE(SU);
CHECK_TYPE(EXT_SU);
CHECK_TYPE(MU);
CHECK_TYPE(TRIG);
he->data1 |= cpu_to_le16(he_type >> RATE_MCS_HE_TYPE_POS);
if (rate_n_flags & RATE_MCS_BF_POS)
he->data5 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA5_TXBF);
switch ((rate_n_flags & RATE_MCS_HE_GI_LTF_MSK) >>
RATE_MCS_HE_GI_LTF_POS) {
case 0:
rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
break;
case 1:
rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
break;
case 2:
rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6;
break;
case 3:
if (rate_n_flags & RATE_MCS_SGI_MSK)
rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8;
else
rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2;
break;
}
switch (he_type) {
case RATE_MCS_HE_TYPE_SU: {
u16 val;
/* LTF syms correspond to streams */
he->data2 |=
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
switch (rx_status->nss) {
case 1:
val = 0;
break;
case 2:
val = 1;
break;
case 3:
case 4:
val = 2;
break;
case 5:
case 6:
val = 3;
break;
case 7:
case 8:
val = 4;
break;
default:
WARN_ONCE(1, "invalid nss: %d\n",
rx_status->nss);
val = 0;
}
he->data5 |=
le16_encode_bits(val,
IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS);
}
break;
case RATE_MCS_HE_TYPE_MU: {
u16 val;
if (he_phy_data == HE_PHY_DATA_INVAL)
break;
val = FIELD_GET(IWL_RX_HE_PHY_HE_LTF_NUM_MASK,
le64_to_cpu(desc->he_phy_data));
he->data2 |=
cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN);
he->data5 |=
cpu_to_le16(FIELD_PREP(
IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS,
val));
}
break;
case RATE_MCS_HE_TYPE_EXT_SU:
case RATE_MCS_HE_TYPE_TRIG:
/* not supported yet */
break;
}
} else {
int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
rx_status->band);
......
......@@ -2184,7 +2184,7 @@ static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
struct iwl_mvm_baid_data *data,
u16 ssn, u8 buf_size)
u16 ssn, u16 buf_size)
{
int i;
......@@ -2211,7 +2211,7 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
}
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u16 ssn, bool start, u8 buf_size, u16 timeout)
int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
{
struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
struct iwl_mvm_add_sta_cmd cmd = {};
......@@ -2273,7 +2273,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
if (start) {
cmd.add_immediate_ba_tid = (u8) tid;
cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
cmd.rx_ba_window = cpu_to_le16((u16)buf_size);
cmd.rx_ba_window = cpu_to_le16(buf_size);
} else {
cmd.remove_immediate_ba_tid = (u8) tid;
}
......@@ -2559,7 +2559,7 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
}
int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u8 buf_size,
struct ieee80211_sta *sta, u16 tid, u16 buf_size,
bool amsdu)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
......
......@@ -412,7 +412,7 @@ struct iwl_mvm_sta {
u32 tfd_queue_msk;
u32 mac_id_n_color;
u16 tid_disable_agg;
u8 max_agg_bufsize;
u16 max_agg_bufsize;
enum iwl_sta_type sta_type;
enum ieee80211_sta_state sta_state;
bool bt_reduced_txpower;
......@@ -518,11 +518,11 @@ void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
/* AMPDU */
int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
int tid, u16 ssn, bool start, u8 buf_size, u16 timeout);
int tid, u16 ssn, bool start, u16 buf_size, u16 timeout);
int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u16 *ssn);
int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid, u8 buf_size,
struct ieee80211_sta *sta, u16 tid, u16 buf_size,
bool amsdu);
int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, u16 tid);
......
......@@ -484,13 +484,15 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
/* Make sure we zero enough of dev_cmd */
BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) > sizeof(*tx_cmd));
BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) > sizeof(*tx_cmd));
memset(dev_cmd, 0, sizeof(dev_cmd->hdr) + sizeof(*tx_cmd));
dev_cmd->hdr.cmd = TX_CMD;
if (iwl_mvm_has_new_tx_api(mvm)) {
struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload;
u16 offload_assist = 0;
u32 rate_n_flags = 0;
u16 flags = 0;
if (ieee80211_is_data_qos(hdr->frame_control)) {
u8 *qc = ieee80211_get_qos_ctl(hdr);
......@@ -507,7 +509,20 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
!(offload_assist & BIT(TX_CMD_OFFLD_AMSDU)))
offload_assist |= BIT(TX_CMD_OFFLD_PAD);
cmd->offload_assist |= cpu_to_le16(offload_assist);
if (!info->control.hw_key)
flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
/* For data packets rate info comes from the fw */
if (!(ieee80211_is_data(hdr->frame_control) && sta)) {
flags |= IWL_TX_FLAGS_CMD_RATE;
rate_n_flags = iwl_mvm_get_tx_rate(mvm, info, sta);
}
if (mvm->trans->cfg->device_family >=
IWL_DEVICE_FAMILY_22560) {
struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload;
cmd->offload_assist |= cpu_to_le32(offload_assist);
/* Total # bytes to be transmitted */
cmd->len = cpu_to_le16((u16)skb->len);
......@@ -515,17 +530,22 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
/* Copy MAC header from skb into command buffer */
memcpy(cmd->hdr, hdr, hdrlen);
if (!info->control.hw_key)
cmd->flags |= cpu_to_le32(IWL_TX_FLAGS_ENCRYPT_DIS);
cmd->flags = cpu_to_le16(flags);
cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
} else {
struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload;
/* For data packets rate info comes from the fw */
if (ieee80211_is_data(hdr->frame_control) && sta)
goto out;
cmd->offload_assist |= cpu_to_le16(offload_assist);
/* Total # bytes to be transmitted */
cmd->len = cpu_to_le16((u16)skb->len);
cmd->flags |= cpu_to_le32(IWL_TX_FLAGS_CMD_RATE);
cmd->rate_n_flags =
cpu_to_le32(iwl_mvm_get_tx_rate(mvm, info, sta));
/* Copy MAC header from skb into command buffer */
memcpy(cmd->hdr, hdr, hdrlen);
cmd->flags = cpu_to_le32(flags);
cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
}
goto out;
}
......
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* BSD LICENSE
*
* Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#include "iwl-trans.h"
#include "iwl-fh.h"
#include "iwl-context-info-gen3.h"
#include "internal.h"
#include "iwl-prph.h"
int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans,
const struct fw_img *fw)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_context_info_gen3 *ctxt_info_gen3;
struct iwl_prph_scratch *prph_scratch;
struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl;
struct iwl_prph_info *prph_info;
void *iml_img;
u32 control_flags = 0;
int ret;
/* Allocate prph scratch */
prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch),
&trans_pcie->prph_scratch_dma_addr,
GFP_KERNEL);
if (!prph_scratch)
return -ENOMEM;
prph_sc_ctrl = &prph_scratch->ctrl_cfg;
prph_sc_ctrl->version.version = 0;
prph_sc_ctrl->version.mac_id =
cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV));
prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4);
control_flags = IWL_PRPH_SCRATCH_RB_SIZE_4K |
IWL_PRPH_SCRATCH_MTR_MODE |
(IWL_PRPH_MTR_FORMAT_256B &
IWL_PRPH_SCRATCH_MTR_FORMAT) |
IWL_PRPH_SCRATCH_EARLY_DEBUG_EN |
IWL_PRPH_SCRATCH_EDBG_DEST_DRAM;
prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags);
/* initialize RX default queue */
prph_sc_ctrl->rbd_cfg.free_rbd_addr =
cpu_to_le64(trans_pcie->rxq->bd_dma);
/* Configure debug, for integration */
iwl_pcie_alloc_fw_monitor(trans, 0);
prph_sc_ctrl->hwm_cfg.hwm_base_addr =
cpu_to_le64(trans_pcie->fw_mon_phys);
prph_sc_ctrl->hwm_cfg.hwm_size =
cpu_to_le32(trans_pcie->fw_mon_size);
/* allocate ucode sections in dram and set addresses */
ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram);
if (ret) {
dma_free_coherent(trans->dev,
sizeof(*prph_scratch),
prph_scratch,
trans_pcie->prph_scratch_dma_addr);
return ret;
}
/* Allocate prph information
* currently we don't assign to the prph info anything, but it would get
* assigned later */
prph_info = dma_alloc_coherent(trans->dev, sizeof(*prph_info),
&trans_pcie->prph_info_dma_addr,
GFP_KERNEL);
if (!prph_info)
return -ENOMEM;
/* Allocate context info */
ctxt_info_gen3 = dma_alloc_coherent(trans->dev,
sizeof(*ctxt_info_gen3),
&trans_pcie->ctxt_info_dma_addr,
GFP_KERNEL);
if (!ctxt_info_gen3)
return -ENOMEM;
ctxt_info_gen3->prph_info_base_addr =
cpu_to_le64(trans_pcie->prph_info_dma_addr);
ctxt_info_gen3->prph_scratch_base_addr =
cpu_to_le64(trans_pcie->prph_scratch_dma_addr);
ctxt_info_gen3->prph_scratch_size =
cpu_to_le32(sizeof(*prph_scratch));
ctxt_info_gen3->cr_head_idx_arr_base_addr =
cpu_to_le64(trans_pcie->rxq->rb_stts_dma);
ctxt_info_gen3->tr_tail_idx_arr_base_addr =
cpu_to_le64(trans_pcie->rxq->tr_tail_dma);
ctxt_info_gen3->cr_tail_idx_arr_base_addr =
cpu_to_le64(trans_pcie->rxq->cr_tail_dma);
ctxt_info_gen3->cr_idx_arr_size =
cpu_to_le16(IWL_NUM_OF_COMPLETION_RINGS);
ctxt_info_gen3->tr_idx_arr_size =
cpu_to_le16(IWL_NUM_OF_TRANSFER_RINGS);
ctxt_info_gen3->mtr_base_addr =
cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr);
ctxt_info_gen3->mcr_base_addr =
cpu_to_le64(trans_pcie->rxq->used_bd_dma);
ctxt_info_gen3->mtr_size =
cpu_to_le16(TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS));
ctxt_info_gen3->mcr_size =
cpu_to_le16(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE));
trans_pcie->ctxt_info_gen3 = ctxt_info_gen3;
trans_pcie->prph_info = prph_info;
trans_pcie->prph_scratch = prph_scratch;
/* Allocate IML */
iml_img = dma_alloc_coherent(trans->dev, trans->iml_len,
&trans_pcie->iml_dma_addr, GFP_KERNEL);
if (!iml_img)
return -ENOMEM;
memcpy(iml_img, trans->iml, trans->iml_len);
iwl_enable_interrupts(trans);
/* kick FW self load */
iwl_write64(trans, CSR_CTXT_INFO_ADDR,
trans_pcie->ctxt_info_dma_addr);
iwl_write64(trans, CSR_IML_DATA_ADDR,
trans_pcie->iml_dma_addr);
iwl_write32(trans, CSR_IML_SIZE_ADDR, trans->iml_len);
iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL, CSR_AUTO_FUNC_BOOT_ENA);
iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT);
return 0;
}
void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
if (!trans_pcie->ctxt_info_gen3)
return;
dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3),
trans_pcie->ctxt_info_gen3,
trans_pcie->ctxt_info_dma_addr);
trans_pcie->ctxt_info_dma_addr = 0;
trans_pcie->ctxt_info_gen3 = NULL;
iwl_pcie_ctxt_info_free_fw_img(trans);
dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch),
trans_pcie->prph_scratch,
trans_pcie->prph_scratch_dma_addr);
trans_pcie->prph_scratch_dma_addr = 0;
trans_pcie->prph_scratch = NULL;
dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_info),
trans_pcie->prph_info,
trans_pcie->prph_info_dma_addr);
trans_pcie->prph_info_dma_addr = 0;
trans_pcie->prph_info = NULL;
}
......@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
......@@ -19,6 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -55,57 +57,6 @@
#include "internal.h"
#include "iwl-prph.h"
static int iwl_pcie_get_num_sections(const struct fw_img *fw,
int start)
{
int i = 0;
while (start < fw->num_sec &&
fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
start++;
i++;
}
return i;
}
static int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
const struct fw_desc *sec,
struct iwl_dram_data *dram)
{
dram->block = dma_alloc_coherent(trans->dev, sec->len,
&dram->physical,
GFP_KERNEL);
if (!dram->block)
return -ENOMEM;
dram->size = sec->len;
memcpy(dram->block, sec->data, sec->len);
return 0;
}
static void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
int i;
if (!dram->fw) {
WARN_ON(dram->fw_cnt);
return;
}
for (i = 0; i < dram->fw_cnt; i++)
dma_free_coherent(trans->dev, dram->fw[i].size,
dram->fw[i].block, dram->fw[i].physical);
kfree(dram->fw);
dram->fw_cnt = 0;
dram->fw = NULL;
}
void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
......@@ -128,13 +79,12 @@ void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans)
dram->paging = NULL;
}
static int iwl_pcie_ctxt_info_init_fw_sec(struct iwl_trans *trans,
int iwl_pcie_init_fw_sec(struct iwl_trans *trans,
const struct fw_img *fw,
struct iwl_context_info *ctxt_info)
struct iwl_context_info_dram *ctxt_dram)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
struct iwl_context_info_dram *ctxt_dram = &ctxt_info->dram;
int i, ret, lmac_cnt, umac_cnt, paging_cnt;
if (WARN(dram->paging,
......@@ -247,7 +197,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans,
TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS);
/* allocate ucode sections in dram and set addresses */
ret = iwl_pcie_ctxt_info_init_fw_sec(trans, fw, ctxt_info);
ret = iwl_pcie_init_fw_sec(trans, fw, &ctxt_info->dram);
if (ret) {
dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info),
ctxt_info, trans_pcie->ctxt_info_dma_addr);
......
......@@ -816,8 +816,6 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x2720, 0x1080, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x2720, 0x0090, iwl22000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x2720, 0x0310, iwl22000_2ac_cfg_hr_cdb)},
{IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0x40C0, 0x0A10, iwl22000_2ax_cfg_hr)},
{IWL_PCI_DEVICE(0xA0F0, 0x0000, iwl22000_2ax_cfg_hr)},
#endif /* CONFIG_IWLMVM */
......
......@@ -3,6 +3,7 @@
* Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
* Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
* Copyright(c) 2016 - 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
......@@ -17,8 +18,7 @@
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
* this program.
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
......@@ -59,6 +59,7 @@
#define RX_POST_REQ_ALLOC 2
#define RX_CLAIM_REQ_ALLOC 8
#define RX_PENDING_WATERMARK 16
#define FIRST_RX_QUEUE 512
struct iwl_host_cmd;
......@@ -98,6 +99,106 @@ struct isr_statistics {
u32 unhandled;
};
#define IWL_CD_STTS_OPTIMIZED_POS 0
#define IWL_CD_STTS_OPTIMIZED_MSK 0x01
#define IWL_CD_STTS_TRANSFER_STATUS_POS 1
#define IWL_CD_STTS_TRANSFER_STATUS_MSK 0x0E
#define IWL_CD_STTS_WIFI_STATUS_POS 4
#define IWL_CD_STTS_WIFI_STATUS_MSK 0xF0
/**
* enum iwl_completion_desc_transfer_status - transfer status (bits 1-3)
* @IWL_CD_STTS_END_TRANSFER: successful transfer complete.
* In sniffer mode, when split is used, set in last CD completion. (RX)
* @IWL_CD_STTS_OVERFLOW: In sniffer mode, when using split - used for
* all CD completion. (RX)
* @IWL_CD_STTS_ABORTED: CR abort / close flow. (RX)
*/
enum iwl_completion_desc_transfer_status {
IWL_CD_STTS_UNUSED,
IWL_CD_STTS_UNUSED_2,
IWL_CD_STTS_END_TRANSFER,
IWL_CD_STTS_OVERFLOW,
IWL_CD_STTS_ABORTED,
IWL_CD_STTS_ERROR,
};
/**
* enum iwl_completion_desc_wifi_status - wifi status (bits 4-7)
* @IWL_CD_STTS_VALID: the packet is valid (RX)
* @IWL_CD_STTS_FCS_ERR: frame check sequence error (RX)
* @IWL_CD_STTS_SEC_KEY_ERR: error handling the security key of rx (RX)
* @IWL_CD_STTS_DECRYPTION_ERR: error decrypting the frame (RX)
* @IWL_CD_STTS_DUP: duplicate packet (RX)
* @IWL_CD_STTS_ICV_MIC_ERR: MIC error (RX)
* @IWL_CD_STTS_INTERNAL_SNAP_ERR: problems removing the snap (RX)
* @IWL_CD_STTS_SEC_PORT_FAIL: security port fail (RX)
* @IWL_CD_STTS_BA_OLD_SN: block ack received old SN (RX)
* @IWL_CD_STTS_QOS_NULL: QoS null packet (RX)
* @IWL_CD_STTS_MAC_HDR_ERR: MAC header conversion error (RX)
* @IWL_CD_STTS_MAX_RETRANS: reached max number of retransmissions (TX)
* @IWL_CD_STTS_EX_LIFETIME: exceeded lifetime (TX)
* @IWL_CD_STTS_NOT_USED: completed but not used (RX)
* @IWL_CD_STTS_REPLAY_ERR: pn check failed, replay error (RX)
*/
enum iwl_completion_desc_wifi_status {
IWL_CD_STTS_VALID,
IWL_CD_STTS_FCS_ERR,
IWL_CD_STTS_SEC_KEY_ERR,
IWL_CD_STTS_DECRYPTION_ERR,
IWL_CD_STTS_DUP,
IWL_CD_STTS_ICV_MIC_ERR,
IWL_CD_STTS_INTERNAL_SNAP_ERR,
IWL_CD_STTS_SEC_PORT_FAIL,
IWL_CD_STTS_BA_OLD_SN,
IWL_CD_STTS_QOS_NULL,
IWL_CD_STTS_MAC_HDR_ERR,
IWL_CD_STTS_MAX_RETRANS,
IWL_CD_STTS_EX_LIFETIME,
IWL_CD_STTS_NOT_USED,
IWL_CD_STTS_REPLAY_ERR,
};
#define IWL_RX_TD_TYPE 0xff000000
#define IWL_RX_TD_SIZE 0x00ffffff
/**
* struct iwl_rx_transfer_desc - transfer descriptor
* @type_n_size: buffer type (bit 0: external buff valid,
* bit 1: optional footer valid, bit 2-7: reserved)
* and buffer size
* @addr: ptr to free buffer start address
* @rbid: unique tag of the buffer
* @reserved: reserved
*/
struct iwl_rx_transfer_desc {
__le32 type_n_size;
__le64 addr;
__le16 rbid;
__le16 reserved;
} __packed;
#define IWL_RX_CD_SIZE 0xffffff00
/**
* struct iwl_rx_completion_desc - completion descriptor
* @type: buffer type (bit 0: external buff valid,
* bit 1: optional footer valid, bit 2-7: reserved)
* @status: status of the completion
* @reserved1: reserved
* @rbid: unique tag of the received buffer
* @size: buffer size, masked by IWL_RX_CD_SIZE
* @reserved2: reserved
*/
struct iwl_rx_completion_desc {
u8 type;
u8 status;
__le16 reserved1;
__le16 rbid;
__le32 size;
u8 reserved2[22];
} __packed;
/**
* struct iwl_rxq - Rx queue
* @id: queue index
......@@ -106,6 +207,10 @@ struct isr_statistics {
* @bd_dma: bus address of buffer of receive buffer descriptors (rbd)
* @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd)
* @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd)
* @tr_tail: driver's pointer to the transmission ring tail buffer
* @tr_tail_dma: physical address of the buffer for the transmission ring tail
* @cr_tail: driver's pointer to the completion ring tail buffer
* @cr_tail_dma: physical address of the buffer for the completion ring tail
* @read: Shared index to newest available Rx buffer
* @write: Shared index to oldest written Rx packet
* @free_count: Number of pre-allocated buffers in rx_free
......@@ -127,6 +232,10 @@ struct iwl_rxq {
dma_addr_t bd_dma;
__le32 *used_bd;
dma_addr_t used_bd_dma;
__le16 *tr_tail;
dma_addr_t tr_tail_dma;
__le16 *cr_tail;
dma_addr_t cr_tail_dma;
u32 read;
u32 write;
u32 free_count;
......@@ -175,18 +284,18 @@ struct iwl_dma_ptr {
* iwl_queue_inc_wrap - increment queue index, wrap back to beginning
* @index -- current index
*/
static inline int iwl_queue_inc_wrap(int index)
static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index)
{
return ++index & (TFD_QUEUE_SIZE_MAX - 1);
return ++index & (trans->cfg->base_params->max_tfd_queue_size - 1);
}
/**
* iwl_queue_dec_wrap - decrement queue index, wrap back to end
* @index -- current index
*/
static inline int iwl_queue_dec_wrap(int index)
static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index)
{
return --index & (TFD_QUEUE_SIZE_MAX - 1);
return --index & (trans->cfg->base_params->max_tfd_queue_size - 1);
}
struct iwl_cmd_meta {
......@@ -314,6 +423,18 @@ enum iwl_shared_irq_flags {
IWL_SHARED_IRQ_FIRST_RSS = BIT(1),
};
/**
* enum iwl_image_response_code - image response values
* @IWL_IMAGE_RESP_DEF: the default value of the register
* @IWL_IMAGE_RESP_SUCCESS: iml was read successfully
* @IWL_IMAGE_RESP_FAIL: iml reading failed
*/
enum iwl_image_response_code {
IWL_IMAGE_RESP_DEF = 0,
IWL_IMAGE_RESP_SUCCESS = 1,
IWL_IMAGE_RESP_FAIL = 2,
};
/**
* struct iwl_dram_data
* @physical: page phy pointer
......@@ -347,6 +468,12 @@ struct iwl_self_init_dram {
* @global_table: table mapping received VID from hw to rxb
* @rba: allocator for RX replenishing
* @ctxt_info: context information for FW self init
* @ctxt_info_gen3: context information for gen3 devices
* @prph_info: prph info for self init
* @prph_scratch: prph scratch for self init
* @ctxt_info_dma_addr: dma addr of context information
* @prph_info_dma_addr: dma addr of prph info
* @prph_scratch_dma_addr: dma addr of prph scratch
* @ctxt_info_dma_addr: dma addr of context information
* @init_dram: DRAM data of firmware image (including paging).
* Context information addresses will be taken from here.
......@@ -391,8 +518,16 @@ struct iwl_trans_pcie {
struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE];
struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE];
struct iwl_rb_allocator rba;
union {
struct iwl_context_info *ctxt_info;
struct iwl_context_info_gen3 *ctxt_info_gen3;
};
struct iwl_prph_info *prph_info;
struct iwl_prph_scratch *prph_scratch;
dma_addr_t ctxt_info_dma_addr;
dma_addr_t prph_info_dma_addr;
dma_addr_t prph_scratch_dma_addr;
dma_addr_t iml_dma_addr;
struct iwl_self_init_dram init_dram;
struct iwl_trans *trans;
......@@ -588,6 +723,60 @@ static inline void _iwl_disable_interrupts(struct iwl_trans *trans)
IWL_DEBUG_ISR(trans, "Disabled interrupts\n");
}
#define IWL_NUM_OF_COMPLETION_RINGS 31
#define IWL_NUM_OF_TRANSFER_RINGS 527
static inline int iwl_pcie_get_num_sections(const struct fw_img *fw,
int start)
{
int i = 0;
while (start < fw->num_sec &&
fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION &&
fw->sec[start].offset != PAGING_SEPARATOR_SECTION) {
start++;
i++;
}
return i;
}
static inline int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans,
const struct fw_desc *sec,
struct iwl_dram_data *dram)
{
dram->block = dma_alloc_coherent(trans->dev, sec->len,
&dram->physical,
GFP_KERNEL);
if (!dram->block)
return -ENOMEM;
dram->size = sec->len;
memcpy(dram->block, sec->data, sec->len);
return 0;
}
static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_self_init_dram *dram = &trans_pcie->init_dram;
int i;
if (!dram->fw) {
WARN_ON(dram->fw_cnt);
return;
}
for (i = 0; i < dram->fw_cnt; i++)
dma_free_coherent(trans->dev, dram->fw[i].size,
dram->fw[i].block, dram->fw[i].physical);
kfree(dram->fw);
dram->fw_cnt = 0;
dram->fw = NULL;
}
static inline void iwl_disable_interrupts(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
......@@ -660,7 +849,7 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans)
}
}
static inline u8 iwl_pcie_get_cmd_index(struct iwl_txq *q, u32 index)
static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index)
{
return index & (q->n_window - 1);
}
......@@ -730,9 +919,13 @@ static inline void iwl_stop_queue(struct iwl_trans *trans,
static inline bool iwl_queue_used(const struct iwl_txq *q, int i)
{
return q->write_ptr >= q->read_ptr ?
(i >= q->read_ptr && i < q->write_ptr) :
!(i < q->read_ptr && i >= q->write_ptr);
int index = iwl_pcie_get_cmd_index(q, i);
int r = iwl_pcie_get_cmd_index(q, q->read_ptr);
int w = iwl_pcie_get_cmd_index(q, q->write_ptr);
return w >= r ?
(index >= r && index < w) :
!(index < r && index >= w);
}
static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
......@@ -801,7 +994,7 @@ bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans);
void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
bool was_in_rfkill);
void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq);
int iwl_queue_space(const struct iwl_txq *q);
int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q);
void iwl_pcie_apm_stop_master(struct iwl_trans *trans);
void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie);
int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
......@@ -818,6 +1011,9 @@ void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len);
#endif
/* common functions that are used by gen3 transport */
void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power);
/* transport gen 2 exported functions */
int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
const struct fw_img *fw, bool run_in_rfkill);
......
......@@ -18,8 +18,7 @@
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
* this program.
*
* The full GNU General Public License is included in this distribution in the
* file called LICENSE.
......@@ -37,6 +36,7 @@
#include "iwl-io.h"
#include "internal.h"
#include "iwl-op-mode.h"
#include "iwl-context-info-gen3.h"
/******************************************************************************
*
......@@ -167,7 +167,12 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
*/
int iwl_pcie_rx_stop(struct iwl_trans *trans)
{
if (trans->cfg->mq_rx_supported) {
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
/* TODO: remove this for 22560 once fw does it */
iwl_write_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0);
return iwl_poll_prph_bit(trans, RFH_GEN_STATUS_GEN3,
RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
} else if (trans->cfg->mq_rx_supported) {
iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
......@@ -209,7 +214,11 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
}
rxq->write_actual = round_down(rxq->write, 8);
if (trans->cfg->mq_rx_supported)
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
iwl_write32(trans, HBUS_TARG_WRPTR,
(rxq->write_actual |
((FIRST_RX_QUEUE + rxq->id) << 16)));
else if (trans->cfg->mq_rx_supported)
iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
rxq->write_actual);
else
......@@ -608,27 +617,56 @@ void iwl_pcie_rx_allocator_work(struct work_struct *data)
iwl_pcie_rx_allocator(trans_pcie->trans);
}
static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
struct iwl_rxq *rxq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rb_allocator *rba = &trans_pcie->rba;
struct device *dev = trans->dev;
int i;
int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
sizeof(__le32);
if (WARN_ON(trans_pcie->rxq))
return -EINVAL;
if (rxq->bd)
dma_free_coherent(dev, free_size * rxq->queue_size,
rxq->bd, rxq->bd_dma);
rxq->bd_dma = 0;
rxq->bd = NULL;
trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
GFP_KERNEL);
if (!trans_pcie->rxq)
return -EINVAL;
if (rxq->rb_stts)
dma_free_coherent(trans->dev,
sizeof(struct iwl_rb_status),
rxq->rb_stts, rxq->rb_stts_dma);
rxq->rb_stts_dma = 0;
rxq->rb_stts = NULL;
spin_lock_init(&rba->lock);
if (rxq->used_bd)
dma_free_coherent(dev, sizeof(__le32) * rxq->queue_size,
rxq->used_bd, rxq->used_bd_dma);
rxq->used_bd_dma = 0;
rxq->used_bd = NULL;
for (i = 0; i < trans->num_rx_queues; i++) {
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
return;
if (rxq->tr_tail)
dma_free_coherent(dev, sizeof(__le16),
rxq->tr_tail, rxq->tr_tail_dma);
rxq->tr_tail_dma = 0;
rxq->tr_tail = NULL;
if (rxq->cr_tail)
dma_free_coherent(dev, sizeof(__le16),
rxq->cr_tail, rxq->cr_tail_dma);
rxq->cr_tail_dma = 0;
rxq->cr_tail = NULL;
}
static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
struct iwl_rxq *rxq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct device *dev = trans->dev;
int i;
int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
sizeof(__le32);
spin_lock_init(&rxq->lock);
if (trans->cfg->mq_rx_supported)
......@@ -656,41 +694,69 @@ static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
goto err;
}
/*Allocate the driver's pointer to receive buffer status */
/* Allocate the driver's pointer to receive buffer status */
rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts),
&rxq->rb_stts_dma,
GFP_KERNEL);
if (!rxq->rb_stts)
goto err;
}
if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
return 0;
/* Allocate the driver's pointer to TR tail */
rxq->tr_tail = dma_zalloc_coherent(dev, sizeof(__le16),
&rxq->tr_tail_dma,
GFP_KERNEL);
if (!rxq->tr_tail)
goto err;
/* Allocate the driver's pointer to CR tail */
rxq->cr_tail = dma_zalloc_coherent(dev, sizeof(__le16),
&rxq->cr_tail_dma,
GFP_KERNEL);
if (!rxq->cr_tail)
goto err;
return 0;
err:
for (i = 0; i < trans->num_rx_queues; i++) {
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
if (rxq->bd)
dma_free_coherent(dev, free_size * rxq->queue_size,
rxq->bd, rxq->bd_dma);
rxq->bd_dma = 0;
rxq->bd = NULL;
if (rxq->rb_stts)
dma_free_coherent(trans->dev,
sizeof(struct iwl_rb_status),
rxq->rb_stts, rxq->rb_stts_dma);
if (rxq->used_bd)
dma_free_coherent(dev, sizeof(__le32) * rxq->queue_size,
rxq->used_bd, rxq->used_bd_dma);
rxq->used_bd_dma = 0;
rxq->used_bd = NULL;
iwl_pcie_free_rxq_dma(trans, rxq);
}
kfree(trans_pcie->rxq);
return -ENOMEM;
}
static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rb_allocator *rba = &trans_pcie->rba;
int i, ret;
if (WARN_ON(trans_pcie->rxq))
return -EINVAL;
trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
GFP_KERNEL);
if (!trans_pcie->rxq)
return -EINVAL;
spin_lock_init(&rba->lock);
for (i = 0; i < trans->num_rx_queues; i++) {
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
ret = iwl_pcie_alloc_rxq_dma(trans, rxq);
if (ret)
return ret;
}
return 0;
}
static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
......@@ -792,6 +858,9 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
int i;
switch (trans_pcie->rx_buf_size) {
case IWL_AMSDU_2K:
rb_size = RFH_RXF_DMA_RB_SIZE_2K;
break;
case IWL_AMSDU_4K:
rb_size = RFH_RXF_DMA_RB_SIZE_4K;
break;
......@@ -1002,8 +1071,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rb_allocator *rba = &trans_pcie->rba;
int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) :
sizeof(__le32);
int i;
/*
......@@ -1022,27 +1089,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
for (i = 0; i < trans->num_rx_queues; i++) {
struct iwl_rxq *rxq = &trans_pcie->rxq[i];
if (rxq->bd)
dma_free_coherent(trans->dev,
free_size * rxq->queue_size,
rxq->bd, rxq->bd_dma);
rxq->bd_dma = 0;
rxq->bd = NULL;
if (rxq->rb_stts)
dma_free_coherent(trans->dev,
sizeof(struct iwl_rb_status),
rxq->rb_stts, rxq->rb_stts_dma);
else
IWL_DEBUG_INFO(trans,
"Free rxq->rb_stts which is NULL\n");
if (rxq->used_bd)
dma_free_coherent(trans->dev,
sizeof(__le32) * rxq->queue_size,
rxq->used_bd, rxq->used_bd_dma);
rxq->used_bd_dma = 0;
rxq->used_bd = NULL;
iwl_pcie_free_rxq_dma(trans, rxq);
if (rxq->napi.poll)
netif_napi_del(&rxq->napi);
......@@ -1970,7 +2017,8 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
/* Error detected by uCode */
if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) ||
(inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) {
(inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR) ||
(inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) {
IWL_ERR(trans,
"Microcode SW error detected. Restarting 0x%X.\n",
inta_fh);
......@@ -1995,8 +2043,18 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
}
}
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560 &&
inta_hw & MSIX_HW_INT_CAUSES_REG_IPC) {
/* Reflect IML transfer status */
int res = iwl_read32(trans, CSR_IML_RESP_ADDR);
IWL_DEBUG_ISR(trans, "IML transfer status: %d\n", res);
if (res == IWL_IMAGE_RESP_FAIL) {
isr_stats->sw++;
iwl_pcie_irq_handle_error(trans);
}
} else if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
/* uCode wakes up after power-down sleep */
if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
iwl_pcie_rxq_check_wrptr(trans);
iwl_pcie_txq_check_wrptrs(trans);
......
......@@ -53,6 +53,7 @@
#include "iwl-trans.h"
#include "iwl-prph.h"
#include "iwl-context-info.h"
#include "iwl-context-info-gen3.h"
#include "internal.h"
/*
......@@ -188,6 +189,9 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power)
}
iwl_pcie_ctxt_info_free_paging(trans);
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560)
iwl_pcie_ctxt_info_gen3_free(trans);
else
iwl_pcie_ctxt_info_free(trans);
/* Make sure (redundant) we've released our request to stay awake */
......@@ -346,6 +350,9 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans,
goto out;
}
if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560)
ret = iwl_pcie_ctxt_info_gen3_init(trans, fw);
else
ret = iwl_pcie_ctxt_info_init(trans, fw);
if (ret)
goto out;
......
......@@ -203,7 +203,7 @@ static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
trans_pcie->fw_mon_size = 0;
}
static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct page *page = NULL;
......@@ -1132,21 +1132,44 @@ static struct iwl_causes_list causes_list[] = {
{MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
};
static struct iwl_causes_list causes_list_v2[] = {
{MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0},
{MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1},
{MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3},
{MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5},
{MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10},
{MSIX_HW_INT_CAUSES_REG_IPC, CSR_MSIX_HW_INT_MASK_AD, 0x11},
{MSIX_HW_INT_CAUSES_REG_SW_ERR_V2, CSR_MSIX_HW_INT_MASK_AD, 0x15},
{MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16},
{MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17},
{MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18},
{MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A},
{MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B},
{MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D},
{MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E},
};
static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
int i;
int i, arr_size =
(trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) ?
ARRAY_SIZE(causes_list) : ARRAY_SIZE(causes_list_v2);
/*
* Access all non RX causes and map them to the default irq.
* In case we are missing at least one interrupt vector,
* the first interrupt vector will serve non-RX and FBQ causes.
*/
for (i = 0; i < ARRAY_SIZE(causes_list); i++) {
iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val);
iwl_clear_bit(trans, causes_list[i].mask_reg,
causes_list[i].cause_num);
for (i = 0; i < arr_size; i++) {
struct iwl_causes_list *causes =
(trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) ?
causes_list : causes_list_v2;
iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val);
iwl_clear_bit(trans, causes[i].mask_reg,
causes[i].cause_num);
}
}
......@@ -2236,9 +2259,9 @@ void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
jiffies_to_msecs(txq->wd_timeout),
txq->read_ptr, txq->write_ptr,
iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
(TFD_QUEUE_SIZE_MAX - 1),
(trans->cfg->base_params->max_tfd_queue_size - 1),
iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
(TFD_QUEUE_SIZE_MAX - 1),
(trans->cfg->base_params->max_tfd_queue_size - 1),
iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
}
......@@ -2934,7 +2957,7 @@ static struct iwl_trans_dump_data
struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue];
struct iwl_fw_error_dump_txcmd *txcmd;
struct iwl_trans_dump_data *dump_data;
u32 len, num_rbs;
u32 len, num_rbs = 0;
u32 monitor_len;
int i, ptr;
bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
......@@ -3057,7 +3080,7 @@ static struct iwl_trans_dump_data
txcmd = (void *)((u8 *)txcmd->data + caplen);
}
ptr = iwl_queue_dec_wrap(ptr);
ptr = iwl_queue_dec_wrap(trans, ptr);
}
spin_unlock_bh(&cmdq->lock);
......@@ -3349,15 +3372,27 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
#if IS_ENABLED(CONFIG_IWLMVM)
trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID);
if (trans->hw_rf_id == CSR_HW_RF_ID_TYPE_HR) {
if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) ==
CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) {
u32 hw_status;
hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS);
if (hw_status & UMAG_GEN_HW_IS_FPGA)
trans->cfg = &iwl22000_2ax_cfg_qnj_hr_f0;
else
if (CSR_HW_RF_STEP(trans->hw_rf_id) == SILICON_B_STEP)
/*
* b step fw is the same for physical card and fpga
*/
trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0;
else if ((hw_status & UMAG_GEN_HW_IS_FPGA) &&
CSR_HW_RF_STEP(trans->hw_rf_id) == SILICON_A_STEP) {
trans->cfg = &iwl22000_2ax_cfg_qnj_hr_a0_f0;
} else {
/*
* a step no FPGA
*/
trans->cfg = &iwl22000_2ac_cfg_hr;
}
}
#endif
iwl_pcie_set_interrupt_capa(pdev, trans);
......
......@@ -6,6 +6,7 @@
* GPL LICENSE SUMMARY
*
* Copyright(c) 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
......@@ -19,6 +20,7 @@
* BSD LICENSE
*
* Copyright(c) 2017 Intel Deutschland GmbH
* Copyright(c) 2018 Intel Corporation
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
......@@ -84,15 +86,19 @@ void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans)
/*
* iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array
*/
static void iwl_pcie_gen2_update_byte_tbl(struct iwl_txq *txq, u16 byte_cnt,
static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie,
struct iwl_txq *txq, u16 byte_cnt,
int num_tbs)
{
struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr;
struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr;
int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
u8 filled_tfd_size, num_fetch_chunks;
u16 len = byte_cnt;
__le16 bc_ent;
if (trans_pcie->bc_table_dword)
len = DIV_ROUND_UP(len, 4);
if (WARN_ON(len > 0xFFF || idx >= txq->n_window))
......@@ -111,6 +117,9 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_txq *txq, u16 byte_cnt,
num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560)
scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent;
else
scd_bc_tbl->tfd_offset[idx] = bc_ent;
}
......@@ -393,8 +402,14 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans,
* (This calculation modifies the TX command, so do it before the
* setup of the first TB)
*/
len = sizeof(struct iwl_tx_cmd_gen2) + sizeof(struct iwl_cmd_header) +
ieee80211_hdrlen(hdr->frame_control) - IWL_FIRST_TB_SIZE;
if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560)
len = sizeof(struct iwl_tx_cmd_gen2);
else
len = sizeof(struct iwl_tx_cmd_gen3);
len += sizeof(struct iwl_cmd_header) +
ieee80211_hdrlen(hdr->frame_control) -
IWL_FIRST_TB_SIZE;
/* do not align A-MSDU to dword as the subframe header aligns it */
if (amsdu)
......@@ -471,9 +486,9 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
struct iwl_device_cmd *dev_cmd, int txq_id)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload;
struct iwl_cmd_meta *out_meta;
struct iwl_txq *txq = trans_pcie->txq[txq_id];
u16 cmd_len;
int idx;
void *tfd;
......@@ -488,11 +503,23 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
spin_lock(&txq->lock);
if (iwl_queue_space(txq) < txq->high_mark) {
if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) {
struct iwl_tx_cmd_gen3 *tx_cmd_gen3 =
(void *)dev_cmd->payload;
cmd_len = le16_to_cpu(tx_cmd_gen3->len);
} else {
struct iwl_tx_cmd_gen2 *tx_cmd_gen2 =
(void *)dev_cmd->payload;
cmd_len = le16_to_cpu(tx_cmd_gen2->len);
}
if (iwl_queue_space(trans, txq) < txq->high_mark) {
iwl_stop_queue(trans, txq);
/* don't put the packet on the ring, if there is no room */
if (unlikely(iwl_queue_space(txq) < 3)) {
if (unlikely(iwl_queue_space(trans, txq) < 3)) {
struct iwl_device_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
......@@ -526,7 +553,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
}
/* Set up entry for this TFD in Tx byte-count array */
iwl_pcie_gen2_update_byte_tbl(txq, le16_to_cpu(tx_cmd->len),
iwl_pcie_gen2_update_byte_tbl(trans_pcie, txq, cmd_len,
iwl_pcie_gen2_get_num_tbs(trans, tfd));
/* start timer if queue currently empty */
......@@ -538,7 +565,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
}
/* Tell device the write index *just past* this latest filled TFD */
txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
/*
* At this point the frame is "transmitted" successfully
......@@ -650,7 +677,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
memset(tfd, 0, sizeof(*tfd));
if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
spin_unlock_bh(&txq->lock);
IWL_ERR(trans, "No space in command queue\n");
......@@ -787,7 +814,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
iwl_trans_ref(trans);
}
/* Increment and update queue's write index */
txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
......@@ -954,7 +981,7 @@ void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id)
iwl_pcie_free_tso_page(trans_pcie, skb);
}
iwl_pcie_gen2_free_tfd(trans, txq);
txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr);
txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
if (txq->read_ptr == txq->write_ptr) {
unsigned long flags;
......@@ -1062,6 +1089,9 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
if (!txq)
return -ENOMEM;
ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl,
(trans->cfg->device_family >=
IWL_DEVICE_FAMILY_22560) ?
sizeof(struct iwl_gen3_bc_tbl) :
sizeof(struct iwlagn_scd_bc_tbl));
if (ret) {
IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
......@@ -1113,7 +1143,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans,
txq->id = qid;
trans_pcie->txq[qid] = txq;
wr_ptr &= (TFD_QUEUE_SIZE_MAX - 1);
wr_ptr &= (trans->cfg->base_params->max_tfd_queue_size - 1);
/* Place first TFD at index corresponding to start sequence number */
txq->read_ptr = wr_ptr;
......
......@@ -71,27 +71,28 @@
*
***************************************************/
int iwl_queue_space(const struct iwl_txq *q)
int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q)
{
unsigned int max;
unsigned int used;
/*
* To avoid ambiguity between empty and completely full queues, there
* should always be less than TFD_QUEUE_SIZE_MAX elements in the queue.
* If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need
* should always be less than max_tfd_queue_size elements in the queue.
* If q->n_window is smaller than max_tfd_queue_size, there is no need
* to reserve any queue entries for this purpose.
*/
if (q->n_window < TFD_QUEUE_SIZE_MAX)
if (q->n_window < trans->cfg->base_params->max_tfd_queue_size)
max = q->n_window;
else
max = TFD_QUEUE_SIZE_MAX - 1;
max = trans->cfg->base_params->max_tfd_queue_size - 1;
/*
* TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to
* modulo by TFD_QUEUE_SIZE_MAX and is well defined.
* max_tfd_queue_size is a power of 2, so the following is equivalent to
* modulo by max_tfd_queue_size and is well defined.
*/
used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1);
used = (q->write_ptr - q->read_ptr) &
(trans->cfg->base_params->max_tfd_queue_size - 1);
if (WARN_ON(used > max))
return 0;
......@@ -489,7 +490,8 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
int slots_num, bool cmd_queue)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
size_t tfd_sz = trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX;
size_t tfd_sz = trans_pcie->tfd_size *
trans->cfg->base_params->max_tfd_queue_size;
size_t tb0_buf_sz;
int i;
......@@ -555,12 +557,16 @@ int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
int slots_num, bool cmd_queue)
{
int ret;
u32 tfd_queue_max_size = trans->cfg->base_params->max_tfd_queue_size;
txq->need_update = false;
/* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise
/* max_tfd_queue_size must be power-of-two size, otherwise
* iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1));
if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
"Max tfd queue size must be a power of two, but is %d",
tfd_queue_max_size))
return -EINVAL;
/* Initialize queue's high/low-water marks, and head/tail indexes */
ret = iwl_queue_init(txq, slots_num);
......@@ -637,7 +643,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
iwl_pcie_free_tso_page(trans_pcie, skb);
}
iwl_pcie_txq_free_tfd(trans, txq);
txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr);
txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
if (txq->read_ptr == txq->write_ptr) {
unsigned long flags;
......@@ -696,7 +702,8 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
/* De-alloc circular buffer of TFDs */
if (txq->tfds) {
dma_free_coherent(dev,
trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX,
trans_pcie->tfd_size *
trans->cfg->base_params->max_tfd_queue_size,
txq->tfds, txq->dma_addr);
txq->dma_addr = 0;
txq->tfds = NULL;
......@@ -916,8 +923,10 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
int ret;
int txq_id, slots_num;
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u16 bc_tbls_size = trans->cfg->base_params->num_of_queues;
u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues *
bc_tbls_size *= (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
sizeof(struct iwl_gen3_bc_tbl) :
sizeof(struct iwlagn_scd_bc_tbl);
/*It is not allowed to alloc twice, so warn when this happens.
......@@ -928,7 +937,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
}
ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
scd_bc_tbls_size);
bc_tbls_size);
if (ret) {
IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
goto error;
......@@ -1064,7 +1073,8 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans_pcie->txq[txq_id];
int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1);
int tfd_num = iwl_pcie_get_cmd_index(txq, ssn);
int read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
int last_to_free;
/* This function is not meant to release cmd queue*/
......@@ -1079,7 +1089,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
goto out;
}
if (txq->read_ptr == tfd_num)
if (read_ptr == tfd_num)
goto out;
IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
......@@ -1087,12 +1097,13 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
/*Since we free until index _not_ inclusive, the one before index is
* the last we will free. This one must be used */
last_to_free = iwl_queue_dec_wrap(tfd_num);
last_to_free = iwl_queue_dec_wrap(trans, tfd_num);
if (!iwl_queue_used(txq, last_to_free)) {
IWL_ERR(trans,
"%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
__func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX,
__func__, txq_id, last_to_free,
trans->cfg->base_params->max_tfd_queue_size,
txq->write_ptr, txq->read_ptr);
goto out;
}
......@@ -1101,10 +1112,10 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
goto out;
for (;
txq->read_ptr != tfd_num;
txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) {
int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
struct sk_buff *skb = txq->entries[idx].skb;
read_ptr != tfd_num;
txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr),
read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr)) {
struct sk_buff *skb = txq->entries[read_ptr].skb;
if (WARN_ON_ONCE(!skb))
continue;
......@@ -1113,7 +1124,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
__skb_queue_tail(skbs, skb);
txq->entries[idx].skb = NULL;
txq->entries[read_ptr].skb = NULL;
if (!trans->cfg->use_tfh)
iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
......@@ -1123,7 +1134,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
iwl_pcie_txq_progress(txq);
if (iwl_queue_space(txq) > txq->low_mark &&
if (iwl_queue_space(trans, txq) > txq->low_mark &&
test_bit(txq_id, trans_pcie->queue_stopped)) {
struct sk_buff_head overflow_skbs;
......@@ -1155,7 +1166,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
}
spin_lock_bh(&txq->lock);
if (iwl_queue_space(txq) > txq->low_mark)
if (iwl_queue_space(trans, txq) > txq->low_mark)
iwl_wake_queue(trans, txq);
}
......@@ -1225,23 +1236,30 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
struct iwl_txq *txq = trans_pcie->txq[txq_id];
unsigned long flags;
int nfreed = 0;
u16 r;
lockdep_assert_held(&txq->lock);
if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(txq, idx))) {
idx = iwl_pcie_get_cmd_index(txq, idx);
r = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
if (idx >= trans->cfg->base_params->max_tfd_queue_size ||
(!iwl_queue_used(txq, idx))) {
IWL_ERR(trans,
"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
__func__, txq_id, idx, TFD_QUEUE_SIZE_MAX,
__func__, txq_id, idx,
trans->cfg->base_params->max_tfd_queue_size,
txq->write_ptr, txq->read_ptr);
return;
}
for (idx = iwl_queue_inc_wrap(idx); txq->read_ptr != idx;
txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) {
for (idx = iwl_queue_inc_wrap(trans, idx); r != idx;
r = iwl_queue_inc_wrap(trans, r)) {
txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
if (nfreed++ > 0) {
IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
idx, txq->write_ptr, txq->read_ptr);
idx, txq->write_ptr, r);
iwl_force_nmi(trans);
}
}
......@@ -1555,7 +1573,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
spin_lock_bh(&txq->lock);
if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
spin_unlock_bh(&txq->lock);
IWL_ERR(trans, "No space in command queue\n");
......@@ -1711,7 +1729,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
}
/* Increment and update queue's write index */
txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
iwl_pcie_txq_inc_wr_ptr(trans, txq);
spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
......@@ -2311,11 +2329,11 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
spin_lock(&txq->lock);
if (iwl_queue_space(txq) < txq->high_mark) {
if (iwl_queue_space(trans, txq) < txq->high_mark) {
iwl_stop_queue(trans, txq);
/* don't put the packet on the ring, if there is no room */
if (unlikely(iwl_queue_space(txq) < 3)) {
if (unlikely(iwl_queue_space(trans, txq) < 3)) {
struct iwl_device_cmd **dev_cmd_ptr;
dev_cmd_ptr = (void *)((u8 *)skb->cb +
......@@ -2444,7 +2462,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
}
/* Tell device the write index *just past* this latest filled TFD */
txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr);
txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
if (!wait_write_ptr)
iwl_pcie_txq_inc_wr_ptr(trans, txq);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment