Commit aa4a6250 authored by John W. Linville's avatar John W. Linville
parents 2df3b0b7 a82dda6c
......@@ -2039,7 +2039,7 @@ static void iwl_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
ieee80211_free_txskb(priv->hw, skb);
}
static void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
static bool iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
{
struct iwl_priv *priv = IWL_OP_MODE_GET_DVM(op_mode);
......@@ -2049,6 +2049,8 @@ static void iwl_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
clear_bit(STATUS_RF_KILL_HW, &priv->status);
wiphy_rfkill_set_hw_state(priv->hw->wiphy, state);
return false;
}
static const struct iwl_op_mode_ops iwl_dvm_ops = {
......
......@@ -134,6 +134,7 @@ const struct iwl_cfg iwl7260_2ac_cfg = {
.nvm_ver = IWL7260_NVM_VERSION,
.nvm_calib_ver = IWL7260_TX_POWER_VERSION,
.host_interrupt_operation_mode = true,
.lp_xtal_workaround = true,
};
const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
......@@ -145,6 +146,7 @@ const struct iwl_cfg iwl7260_2ac_cfg_high_temp = {
.nvm_calib_ver = IWL7260_TX_POWER_VERSION,
.high_temp = true,
.host_interrupt_operation_mode = true,
.lp_xtal_workaround = true,
};
const struct iwl_cfg iwl7260_2n_cfg = {
......@@ -155,6 +157,7 @@ const struct iwl_cfg iwl7260_2n_cfg = {
.nvm_ver = IWL7260_NVM_VERSION,
.nvm_calib_ver = IWL7260_TX_POWER_VERSION,
.host_interrupt_operation_mode = true,
.lp_xtal_workaround = true,
};
const struct iwl_cfg iwl7260_n_cfg = {
......@@ -165,6 +168,7 @@ const struct iwl_cfg iwl7260_n_cfg = {
.nvm_ver = IWL7260_NVM_VERSION,
.nvm_calib_ver = IWL7260_TX_POWER_VERSION,
.host_interrupt_operation_mode = true,
.lp_xtal_workaround = true,
};
const struct iwl_cfg iwl3160_2ac_cfg = {
......
......@@ -262,6 +262,7 @@ struct iwl_cfg {
bool high_temp;
bool d0i3;
u8 nvm_hw_section_num;
bool lp_xtal_workaround;
const struct iwl_pwr_tx_backoff *pwr_tx_backoffs;
};
......
......@@ -138,6 +138,13 @@
/* Analog phase-lock-loop configuration */
#define CSR_ANA_PLL_CFG (CSR_BASE+0x20c)
/*
* CSR HW resources monitor registers
*/
#define CSR_MONITOR_CFG_REG (CSR_BASE+0x214)
#define CSR_MONITOR_STATUS_REG (CSR_BASE+0x228)
#define CSR_MONITOR_XTAL_RESOURCES (0x00000010)
/*
* CSR Hardware Revision Workaround Register. Indicates hardware rev;
* "step" determines CCK backoff for txpower calculation. Used for 4965 only.
......@@ -173,6 +180,7 @@
#define CSR_HW_IF_CONFIG_REG_BIT_NIC_READY (0x00400000) /* PCI_OWN_SEM */
#define CSR_HW_IF_CONFIG_REG_BIT_NIC_PREPARE_DONE (0x02000000) /* ME_OWN */
#define CSR_HW_IF_CONFIG_REG_PREPARE (0x08000000) /* WAKE_ME */
#define CSR_HW_IF_CONFIG_REG_PERSIST_MODE (0x40000000) /* PERSISTENCE */
#define CSR_INT_PERIODIC_DIS (0x00) /* disable periodic int*/
#define CSR_INT_PERIODIC_ENA (0xFF) /* 255*32 usec ~ 8 msec*/
......@@ -240,6 +248,7 @@
* 001 -- MAC power-down
* 010 -- PHY (radio) power-down
* 011 -- Error
* 10: XTAL ON request
* 9-6: SYS_CONFIG
* Indicates current system configuration, reflecting pins on chip
* as forced high/low by device circuit board.
......@@ -271,6 +280,7 @@
#define CSR_GP_CNTRL_REG_FLAG_INIT_DONE (0x00000004)
#define CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ (0x00000008)
#define CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP (0x00000010)
#define CSR_GP_CNTRL_REG_FLAG_XTAL_ON (0x00000400)
#define CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN (0x00000001)
......@@ -395,6 +405,34 @@
#define CSR_DRAM_INT_TBL_ENABLE (1 << 31)
#define CSR_DRAM_INIT_TBL_WRAP_CHECK (1 << 27)
/*
* SHR target access (Shared block memory space)
*
* Shared internal registers can be accessed directly from PCI bus through SHR
* arbiter without need for the MAC HW to be powered up. This is possible due to
* indirect read/write via HEEP_CTRL_WRD_PCIEX_CTRL (0xEC) and
* HEEP_CTRL_WRD_PCIEX_DATA (0xF4) registers.
*
* Use iwl_write32()/iwl_read32() family to access these registers. The MAC HW
* need not be powered up so no "grab inc access" is required.
*/
/*
* Registers for accessing shared registers (e.g. SHR_APMG_GP1,
* SHR_APMG_XTAL_CFG). For example, to read from SHR_APMG_GP1 register (0x1DC),
* first, write to the control register:
* HEEP_CTRL_WRD_PCIEX_CTRL[15:0] = 0x1DC (offset of the SHR_APMG_GP1 register)
* HEEP_CTRL_WRD_PCIEX_CTRL[29:28] = 2 (read access)
* second, read from the data register HEEP_CTRL_WRD_PCIEX_DATA[31:0].
*
* To write the register, first, write to the data register
* HEEP_CTRL_WRD_PCIEX_DATA[31:0] and then:
* HEEP_CTRL_WRD_PCIEX_CTRL[15:0] = 0x1DC (offset of the SHR_APMG_GP1 register)
* HEEP_CTRL_WRD_PCIEX_CTRL[29:28] = 3 (write access)
*/
#define HEEP_CTRL_WRD_PCIEX_CTRL_REG (CSR_BASE+0x0ec)
#define HEEP_CTRL_WRD_PCIEX_DATA_REG (CSR_BASE+0x0f4)
/*
* HBUS (Host-side Bus)
*
......
......@@ -125,6 +125,22 @@ enum iwl_ucode_tlv_flag {
IWL_UCODE_TLV_FLAGS_GO_UAPSD = BIT(30),
};
/**
* enum iwl_ucode_tlv_api - ucode api
* @IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID: wowlan config includes tid field.
*/
enum iwl_ucode_tlv_api {
IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID = BIT(0),
};
/**
* enum iwl_ucode_tlv_capa - ucode capabilities
* @IWL_UCODE_TLV_CAPA_D0I3_SUPPORT: supports D0i3
*/
enum iwl_ucode_tlv_capa {
IWL_UCODE_TLV_CAPA_D0I3_SUPPORT = BIT(0),
};
/* The default calibrate table size if not specified by firmware file */
#define IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE 18
#define IWL_MAX_STANDARD_PHY_CALIBRATE_TBL_SIZE 19
......
......@@ -93,14 +93,14 @@ int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
}
IWL_EXPORT_SYMBOL(iwl_poll_direct_bit);
static inline u32 __iwl_read_prph(struct iwl_trans *trans, u32 ofs)
u32 __iwl_read_prph(struct iwl_trans *trans, u32 ofs)
{
u32 val = iwl_trans_read_prph(trans, ofs);
trace_iwlwifi_dev_ioread_prph32(trans->dev, ofs, val);
return val;
}
static inline void __iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
void __iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
{
trace_iwlwifi_dev_iowrite_prph32(trans->dev, ofs, val);
iwl_trans_write_prph(trans, ofs, val);
......
......@@ -70,7 +70,9 @@ u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg);
void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value);
u32 __iwl_read_prph(struct iwl_trans *trans, u32 ofs);
u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs);
void __iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);
void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val);
int iwl_poll_prph_bit(struct iwl_trans *trans, u32 addr,
u32 bits, u32 mask, int timeout);
......
......@@ -299,9 +299,11 @@ static int iwl_init_channel_map(struct device *dev, const struct iwl_cfg *cfg,
static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
struct iwl_nvm_data *data,
struct ieee80211_sta_vht_cap *vht_cap)
struct ieee80211_sta_vht_cap *vht_cap,
u8 tx_chains, u8 rx_chains)
{
int num_ants = num_of_ant(data->valid_rx_ant);
int num_rx_ants = num_of_ant(rx_chains);
int num_tx_ants = num_of_ant(tx_chains);
vht_cap->vht_supported = true;
......@@ -311,8 +313,10 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
3 << IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT |
7 << IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
if (num_ants > 1)
if (num_tx_ants > 1)
vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
else
vht_cap->cap |= IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;
if (iwlwifi_mod_params.amsdu_size_8K)
vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991;
......@@ -327,10 +331,8 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
IEEE80211_VHT_MCS_NOT_SUPPORTED << 12 |
IEEE80211_VHT_MCS_NOT_SUPPORTED << 14);
if (num_ants == 1 ||
cfg->rx_with_siso_diversity) {
vht_cap->cap |= IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN |
IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN;
if (num_rx_ants == 1 || cfg->rx_with_siso_diversity) {
vht_cap->cap |= IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN;
/* this works because NOT_SUPPORTED == 3 */
vht_cap->vht_mcs.rx_mcs_map |=
cpu_to_le16(IEEE80211_VHT_MCS_NOT_SUPPORTED << 2);
......@@ -375,7 +377,8 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg,
iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, IEEE80211_BAND_5GHZ,
tx_chains, rx_chains);
if (enable_vht)
iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap);
iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap,
tx_chains, rx_chains);
if (n_channels != n_used)
IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n",
......
......@@ -119,7 +119,8 @@ struct iwl_cfg;
* @queue_not_full: notifies that a HW queue is not full any more.
* Must be atomic and called with BH disabled.
* @hw_rf_kill:notifies of a change in the HW rf kill switch. True means that
* the radio is killed. May sleep.
* the radio is killed. Return %true if the device should be stopped by
* the transport immediately after the call. May sleep.
* @free_skb: allows the transport layer to free skbs that haven't been
* reclaimed by the op_mode. This can happen when the driver is freed and
* there are Tx packets pending in the transport layer.
......@@ -144,7 +145,7 @@ struct iwl_op_mode_ops {
struct iwl_device_cmd *cmd);
void (*queue_full)(struct iwl_op_mode *op_mode, int queue);
void (*queue_not_full)(struct iwl_op_mode *op_mode, int queue);
void (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
bool (*hw_rf_kill)(struct iwl_op_mode *op_mode, bool state);
void (*free_skb)(struct iwl_op_mode *op_mode, struct sk_buff *skb);
void (*nic_error)(struct iwl_op_mode *op_mode);
void (*cmd_queue_full)(struct iwl_op_mode *op_mode);
......@@ -195,11 +196,11 @@ static inline void iwl_op_mode_queue_not_full(struct iwl_op_mode *op_mode,
op_mode->ops->queue_not_full(op_mode, queue);
}
static inline void iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode,
bool state)
static inline bool __must_check
iwl_op_mode_hw_rf_kill(struct iwl_op_mode *op_mode, bool state)
{
might_sleep();
op_mode->ops->hw_rf_kill(op_mode, state);
return op_mode->ops->hw_rf_kill(op_mode, state);
}
static inline void iwl_op_mode_free_skb(struct iwl_op_mode *op_mode,
......
......@@ -95,7 +95,8 @@
#define APMG_SVR_VOLTAGE_CONFIG_BIT_MSK (0x000001E0) /* bit 8:5 */
#define APMG_SVR_DIGITAL_VOLTAGE_1_32 (0x00000060)
#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
#define APMG_PCIDEV_STT_VAL_PERSIST_DIS (0x00000200)
#define APMG_PCIDEV_STT_VAL_L1_ACT_DIS (0x00000800)
#define APMG_RTC_INT_STT_RFKILL (0x10000000)
......@@ -105,6 +106,26 @@
/* Device NMI register */
#define DEVICE_SET_NMI_REG 0x00a01c30
/* Shared registers (0x0..0x3ff, via target indirect or periphery */
#define SHR_BASE 0x00a10000
/* Shared GP1 register */
#define SHR_APMG_GP1_REG 0x01dc
#define SHR_APMG_GP1_REG_PRPH (SHR_BASE + SHR_APMG_GP1_REG)
#define SHR_APMG_GP1_WF_XTAL_LP_EN 0x00000004
#define SHR_APMG_GP1_CHICKEN_BIT_SELECT 0x80000000
/* Shared DL_CFG register */
#define SHR_APMG_DL_CFG_REG 0x01c4
#define SHR_APMG_DL_CFG_REG_PRPH (SHR_BASE + SHR_APMG_DL_CFG_REG)
#define SHR_APMG_DL_CFG_RTCS_CLK_SELECTOR_MSK 0x000000c0
#define SHR_APMG_DL_CFG_RTCS_CLK_INTERNAL_XTAL 0x00000080
#define SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP 0x00000100
/* Shared APMG_XTAL_CFG register */
#define SHR_APMG_XTAL_CFG_REG 0x1c0
#define SHR_APMG_XTAL_CFG_XTAL_ON_REQ 0x80000000
/*
* Device reset for family 8000
* write to bit 24 in order to reset the CPU
......
......@@ -2,8 +2,8 @@ obj-$(CONFIG_IWLMVM) += iwlmvm.o
iwlmvm-y += fw.o mac80211.o nvm.o ops.o phy-ctxt.o mac-ctxt.o
iwlmvm-y += utils.o rx.o tx.o binding.o quota.o sta.o sf.o
iwlmvm-y += scan.o time-event.o rs.o
iwlmvm-y += power.o bt-coex.o
iwlmvm-y += led.o tt.o
iwlmvm-y += power.o coex.o
iwlmvm-y += led.o tt.o offloading.o
iwlmvm-$(CONFIG_IWLWIFI_DEBUGFS) += debugfs.o debugfs-vif.o
iwlmvm-$(CONFIG_PM_SLEEP) += d3.o
......
......@@ -61,9 +61,11 @@
*
*****************************************************************************/
#include <linux/ieee80211.h>
#include <linux/etherdevice.h>
#include <net/mac80211.h>
#include "fw-api-bt-coex.h"
#include "fw-api-coex.h"
#include "iwl-modparams.h"
#include "mvm.h"
#include "iwl-debug.h"
......@@ -305,6 +307,215 @@ static const __le32 iwl_bt_mprio_lut[BT_COEX_MULTI_PRIO_LUT_SIZE] = {
cpu_to_le32(0x33113311),
};
struct corunning_block_luts {
u8 range;
__le32 lut20[BT_COEX_CORUN_LUT_SIZE];
};
/*
* Ranges for the antenna coupling calibration / co-running block LUT:
* LUT0: [ 0, 12[
* LUT1: [12, 20[
* LUT2: [20, 21[
* LUT3: [21, 23[
* LUT4: [23, 27[
* LUT5: [27, 30[
* LUT6: [30, 32[
* LUT7: [32, 33[
* LUT8: [33, - [
*/
static const struct corunning_block_luts antenna_coupling_ranges[] = {
{
.range = 0,
.lut20 = {
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
},
},
{
.range = 12,
.lut20 = {
cpu_to_le32(0x00000001), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
},
},
{
.range = 20,
.lut20 = {
cpu_to_le32(0x00000002), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
},
},
{
.range = 21,
.lut20 = {
cpu_to_le32(0x00000003), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
},
},
{
.range = 23,
.lut20 = {
cpu_to_le32(0x00000004), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
},
},
{
.range = 27,
.lut20 = {
cpu_to_le32(0x00000005), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
},
},
{
.range = 30,
.lut20 = {
cpu_to_le32(0x00000006), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
},
},
{
.range = 32,
.lut20 = {
cpu_to_le32(0x00000007), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
},
},
{
.range = 33,
.lut20 = {
cpu_to_le32(0x00000008), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
cpu_to_le32(0x00000000), cpu_to_le32(0x00000000),
},
},
};
static enum iwl_bt_coex_lut_type
iwl_get_coex_type(struct iwl_mvm *mvm, const struct ieee80211_vif *vif)
{
......@@ -390,8 +601,6 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
BT_VALID_LUT |
BT_VALID_WIFI_RX_SW_PRIO_BOOST |
BT_VALID_WIFI_TX_SW_PRIO_BOOST |
BT_VALID_CORUN_LUT_20 |
BT_VALID_CORUN_LUT_40 |
BT_VALID_ANT_ISOLATION |
BT_VALID_ANT_ISOLATION_THRS |
BT_VALID_TXTX_DELTA_FREQ_THRS |
......@@ -401,6 +610,17 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
if (IWL_MVM_BT_COEX_SYNC2SCO)
bt_cmd->flags |= cpu_to_le32(BT_COEX_SYNC2SCO);
if (IWL_MVM_BT_COEX_CORUNNING) {
bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_CORUN_LUT_20 |
BT_VALID_CORUN_LUT_40);
bt_cmd->flags |= cpu_to_le32(BT_COEX_CORUNNING);
}
if (IWL_MVM_BT_COEX_MPLUT) {
bt_cmd->flags |= cpu_to_le32(BT_COEX_MPLUT);
bt_cmd->valid_bit_msk = cpu_to_le32(BT_VALID_MULTI_PRIO_LUT);
}
if (mvm->cfg->bt_shared_single_ant)
memcpy(&bt_cmd->decision_lut, iwl_single_shared_ant,
sizeof(iwl_single_shared_ant));
......@@ -408,6 +628,12 @@ int iwl_send_bt_init_conf(struct iwl_mvm *mvm)
memcpy(&bt_cmd->decision_lut, iwl_combined_lookup,
sizeof(iwl_combined_lookup));
/* Take first Co-running block LUT to get started */
memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[0].lut20,
sizeof(bt_cmd->bt4_corun_lut20));
memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[0].lut20,
sizeof(bt_cmd->bt4_corun_lut40));
memcpy(&bt_cmd->bt_prio_boost, iwl_bt_prio_boost,
sizeof(iwl_bt_prio_boost));
memcpy(&bt_cmd->bt4_multiprio_lut, iwl_bt_mprio_lut,
......@@ -498,7 +724,7 @@ int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable)
struct iwl_host_cmd cmd = {
.id = BT_CONFIG,
.len = { sizeof(*bt_cmd), },
.dataflags = { IWL_HCMD_DFL_DUP, },
.dataflags = { IWL_HCMD_DFL_NOCOPY, },
.flags = CMD_ASYNC,
};
struct iwl_mvm_sta *mvmsta;
......@@ -952,8 +1178,8 @@ void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
#define LINK_QUAL_AGG_TIME_LIMIT_DEF (4000)
#define LINK_QUAL_AGG_TIME_LIMIT_BT_ACT (1200)
u16 iwl_mvm_bt_coex_agg_time_limit(struct iwl_mvm *mvm,
struct ieee80211_sta *sta)
u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
struct ieee80211_sta *sta)
{
struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
enum iwl_bt_coex_lut_type lut_type;
......@@ -989,6 +1215,38 @@ bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
return iwl_get_coex_type(mvm, mvmsta->vif) == BT_COEX_TIGHT_LUT;
}
u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *info, u8 ac)
{
__le16 fc = hdr->frame_control;
if (info->band != IEEE80211_BAND_2GHZ)
return 0;
if (unlikely(mvm->bt_tx_prio))
return mvm->bt_tx_prio - 1;
/* High prio packet (wrt. BT coex) if it is EAPOL, MCAST or MGMT */
if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO ||
is_multicast_ether_addr(hdr->addr1) ||
ieee80211_is_ctl(fc) || ieee80211_is_mgmt(fc) ||
ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc))
return 3;
switch (ac) {
case IEEE80211_AC_BE:
return 1;
case IEEE80211_AC_VO:
return 3;
case IEEE80211_AC_VI:
return 2;
default:
break;
}
return 0;
}
void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
{
if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_NEWBT_COEX))
......@@ -996,3 +1254,69 @@ void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm)
iwl_mvm_bt_coex_notif_handle(mvm);
}
int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *dev_cmd)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u32 ant_isolation = le32_to_cpup((void *)pkt->data);
u8 __maybe_unused lower_bound, upper_bound;
u8 lut;
struct iwl_bt_coex_cmd *bt_cmd;
struct iwl_host_cmd cmd = {
.id = BT_CONFIG,
.len = { sizeof(*bt_cmd), },
.dataflags = { IWL_HCMD_DFL_NOCOPY, },
.flags = CMD_SYNC,
};
if (!IWL_MVM_BT_COEX_CORUNNING)
return 0;
lockdep_assert_held(&mvm->mutex);
if (ant_isolation == mvm->last_ant_isol)
return 0;
for (lut = 0; lut < ARRAY_SIZE(antenna_coupling_ranges) - 1; lut++)
if (ant_isolation < antenna_coupling_ranges[lut + 1].range)
break;
lower_bound = antenna_coupling_ranges[lut].range;
if (lut < ARRAY_SIZE(antenna_coupling_ranges) - 1)
upper_bound = antenna_coupling_ranges[lut + 1].range;
else
upper_bound = antenna_coupling_ranges[lut].range;
IWL_DEBUG_COEX(mvm, "Antenna isolation=%d in range [%d,%d[, lut=%d\n",
ant_isolation, lower_bound, upper_bound, lut);
mvm->last_ant_isol = ant_isolation;
if (mvm->last_corun_lut == lut)
return 0;
mvm->last_corun_lut = lut;
bt_cmd = kzalloc(sizeof(*bt_cmd), GFP_KERNEL);
if (!bt_cmd)
return 0;
cmd.data[0] = bt_cmd;
bt_cmd->flags = cpu_to_le32(BT_COEX_NW);
bt_cmd->valid_bit_msk |= cpu_to_le32(BT_VALID_ENABLE |
BT_VALID_CORUN_LUT_20 |
BT_VALID_CORUN_LUT_40);
/* For the moment, use the same LUT for 20GHz and 40GHz */
memcpy(bt_cmd->bt4_corun_lut20, antenna_coupling_ranges[lut].lut20,
sizeof(bt_cmd->bt4_corun_lut20));
memcpy(bt_cmd->bt4_corun_lut40, antenna_coupling_ranges[lut].lut20,
sizeof(bt_cmd->bt4_corun_lut40));
return 0;
}
......@@ -79,8 +79,8 @@
#define IWL_MVM_PS_SNOOZE_WINDOW 50
#define IWL_MVM_WOWLAN_PS_SNOOZE_WINDOW 25
#define IWL_MVM_LOWLAT_QUOTA_MIN_PERCENT 64
#define IWL_MVM_LOWLAT_SINGLE_BINDING_MAXDUR 24 /* TU */
#define IWL_MVM_LOWLAT_DUAL_BINDING_MAXDUR 24 /* TU */
#define IWL_MVM_BT_COEX_SYNC2SCO 1
#define IWL_MVM_BT_COEX_CORUNNING 1
#define IWL_MVM_BT_COEX_MPLUT 1
#endif /* __MVM_CONSTANTS_H */
......@@ -376,139 +376,6 @@ static int iwl_mvm_send_patterns(struct iwl_mvm *mvm,
return err;
}
static int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
struct ieee80211_vif *vif)
{
union {
struct iwl_proto_offload_cmd_v1 v1;
struct iwl_proto_offload_cmd_v2 v2;
struct iwl_proto_offload_cmd_v3_small v3s;
struct iwl_proto_offload_cmd_v3_large v3l;
} cmd = {};
struct iwl_host_cmd hcmd = {
.id = PROT_OFFLOAD_CONFIG_CMD,
.flags = CMD_SYNC,
.data[0] = &cmd,
.dataflags[0] = IWL_HCMD_DFL_DUP,
};
struct iwl_proto_offload_cmd_common *common;
u32 enabled = 0, size;
u32 capa_flags = mvm->fw->ucode_capa.flags;
#if IS_ENABLED(CONFIG_IPV6)
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int i;
if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL ||
capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
struct iwl_ns_config *nsc;
struct iwl_targ_addr *addrs;
int n_nsc, n_addrs;
int c;
if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
nsc = cmd.v3s.ns_config;
n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S;
addrs = cmd.v3s.targ_addrs;
n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S;
} else {
nsc = cmd.v3l.ns_config;
n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L;
addrs = cmd.v3l.targ_addrs;
n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L;
}
if (mvmvif->num_target_ipv6_addrs)
enabled |= IWL_D3_PROTO_OFFLOAD_NS;
/*
* For each address we have (and that will fit) fill a target
* address struct and combine for NS offload structs with the
* solicited node addresses.
*/
for (i = 0, c = 0;
i < mvmvif->num_target_ipv6_addrs &&
i < n_addrs && c < n_nsc; i++) {
struct in6_addr solicited_addr;
int j;
addrconf_addr_solict_mult(&mvmvif->target_ipv6_addrs[i],
&solicited_addr);
for (j = 0; j < c; j++)
if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr,
&solicited_addr) == 0)
break;
if (j == c)
c++;
addrs[i].addr = mvmvif->target_ipv6_addrs[i];
addrs[i].config_num = cpu_to_le32(j);
nsc[j].dest_ipv6_addr = solicited_addr;
memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN);
}
if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL)
cmd.v3s.num_valid_ipv6_addrs = cpu_to_le32(i);
else
cmd.v3l.num_valid_ipv6_addrs = cpu_to_le32(i);
} else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
if (mvmvif->num_target_ipv6_addrs) {
enabled |= IWL_D3_PROTO_OFFLOAD_NS;
memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN);
}
BUILD_BUG_ON(sizeof(cmd.v2.target_ipv6_addr[0]) !=
sizeof(mvmvif->target_ipv6_addrs[0]));
for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2); i++)
memcpy(cmd.v2.target_ipv6_addr[i],
&mvmvif->target_ipv6_addrs[i],
sizeof(cmd.v2.target_ipv6_addr[i]));
} else {
if (mvmvif->num_target_ipv6_addrs) {
enabled |= IWL_D3_PROTO_OFFLOAD_NS;
memcpy(cmd.v1.ndp_mac_addr, vif->addr, ETH_ALEN);
}
BUILD_BUG_ON(sizeof(cmd.v1.target_ipv6_addr[0]) !=
sizeof(mvmvif->target_ipv6_addrs[0]));
for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1); i++)
memcpy(cmd.v1.target_ipv6_addr[i],
&mvmvif->target_ipv6_addrs[i],
sizeof(cmd.v1.target_ipv6_addr[i]));
}
#endif
if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
common = &cmd.v3s.common;
size = sizeof(cmd.v3s);
} else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
common = &cmd.v3l.common;
size = sizeof(cmd.v3l);
} else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
common = &cmd.v2.common;
size = sizeof(cmd.v2);
} else {
common = &cmd.v1.common;
size = sizeof(cmd.v1);
}
if (vif->bss_conf.arp_addr_cnt) {
enabled |= IWL_D3_PROTO_OFFLOAD_ARP;
common->host_ipv4_addr = vif->bss_conf.arp_addr_list[0];
memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN);
}
if (!enabled)
return 0;
common->enabled = cpu_to_le32(enabled);
hcmd.len[0] = size;
return iwl_mvm_send_cmd(mvm, &hcmd);
}
enum iwl_mvm_tcp_packet_type {
MVM_TCP_TX_SYN,
MVM_TCP_RX_SYNACK,
......@@ -846,8 +713,8 @@ static int iwl_mvm_d3_reprogram(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
quota_cmd.quotas[0].id_and_color =
cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->phy_ctxt->id,
mvmvif->phy_ctxt->color));
quota_cmd.quotas[0].quota = cpu_to_le32(100);
quota_cmd.quotas[0].max_duration = cpu_to_le32(1000);
quota_cmd.quotas[0].quota = cpu_to_le32(IWL_MVM_MAX_QUOTA);
quota_cmd.quotas[0].max_duration = cpu_to_le32(IWL_MVM_MAX_QUOTA);
for (i = 1; i < MAX_BINDINGS; i++)
quota_cmd.quotas[i].id_and_color = cpu_to_le32(FW_CTXT_INVALID);
......@@ -927,6 +794,20 @@ void iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
IWL_ERR(mvm, "failed to set non-QoS seqno\n");
}
static int
iwl_mvm_send_wowlan_config_cmd(struct iwl_mvm *mvm,
const struct iwl_wowlan_config_cmd_v3 *cmd)
{
/* start only with the v2 part of the command */
u16 cmd_len = sizeof(cmd->common);
if (mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_WOWLAN_CONFIG_TID)
cmd_len = sizeof(*cmd);
return iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, CMD_SYNC,
cmd_len, cmd);
}
static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
struct cfg80211_wowlan *wowlan,
bool test)
......@@ -939,7 +820,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
struct iwl_mvm_vif *mvmvif;
struct ieee80211_sta *ap_sta;
struct iwl_mvm_sta *mvm_ap_sta;
struct iwl_wowlan_config_cmd wowlan_config_cmd = {};
struct iwl_wowlan_config_cmd_v3 wowlan_config_cmd = {};
struct iwl_wowlan_kek_kck_material_cmd kek_kck_cmd = {};
struct iwl_wowlan_tkip_params_cmd tkip_cmd = {};
struct iwl_d3_manager_config d3_cfg_cmd_data = {
......@@ -961,7 +842,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
.tkip = &tkip_cmd,
.use_tkip = false,
};
int ret, i;
int ret;
int len __maybe_unused;
if (!wowlan) {
......@@ -1002,49 +883,41 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
mvm_ap_sta = (struct iwl_mvm_sta *)ap_sta->drv_priv;
/* TODO: wowlan_config_cmd.wowlan_ba_teardown_tids */
/* TODO: wowlan_config_cmd.common.wowlan_ba_teardown_tids */
wowlan_config_cmd.is_11n_connection = ap_sta->ht_cap.ht_supported;
wowlan_config_cmd.common.is_11n_connection =
ap_sta->ht_cap.ht_supported;
/* Query the last used seqno and set it */
ret = iwl_mvm_get_last_nonqos_seq(mvm, vif);
if (ret < 0)
goto out_noreset;
wowlan_config_cmd.non_qos_seq = cpu_to_le16(ret);
wowlan_config_cmd.common.non_qos_seq = cpu_to_le16(ret);
/*
* For QoS counters, we store the one to use next, so subtract 0x10
* since the uCode will add 0x10 *before* using the value while we
* increment after using the value (i.e. store the next value to use).
*/
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
u16 seq = mvm_ap_sta->tid_data[i].seq_number;
seq -= 0x10;
wowlan_config_cmd.qos_seq[i] = cpu_to_le16(seq);
}
iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, &wowlan_config_cmd.common);
if (wowlan->disconnect)
wowlan_config_cmd.wakeup_filter |=
wowlan_config_cmd.common.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_BEACON_MISS |
IWL_WOWLAN_WAKEUP_LINK_CHANGE);
if (wowlan->magic_pkt)
wowlan_config_cmd.wakeup_filter |=
wowlan_config_cmd.common.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_MAGIC_PACKET);
if (wowlan->gtk_rekey_failure)
wowlan_config_cmd.wakeup_filter |=
wowlan_config_cmd.common.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_GTK_REKEY_FAIL);
if (wowlan->eap_identity_req)
wowlan_config_cmd.wakeup_filter |=
wowlan_config_cmd.common.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_EAP_IDENT_REQ);
if (wowlan->four_way_handshake)
wowlan_config_cmd.wakeup_filter |=
wowlan_config_cmd.common.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_4WAY_HANDSHAKE);
if (wowlan->n_patterns)
wowlan_config_cmd.wakeup_filter |=
wowlan_config_cmd.common.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_PATTERN_MATCH);
if (wowlan->rfkill_release)
wowlan_config_cmd.wakeup_filter |=
wowlan_config_cmd.common.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_RF_KILL_DEASSERT);
if (wowlan->tcp) {
......@@ -1052,7 +925,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
* Set the "link change" (really "link lost") flag as well
* since that implies losing the TCP connection.
*/
wowlan_config_cmd.wakeup_filter |=
wowlan_config_cmd.common.wakeup_filter |=
cpu_to_le32(IWL_WOWLAN_WAKEUP_REMOTE_LINK_LOSS |
IWL_WOWLAN_WAKEUP_REMOTE_SIGNATURE_TABLE |
IWL_WOWLAN_WAKEUP_REMOTE_WAKEUP_PACKET |
......@@ -1150,9 +1023,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
}
}
ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION,
CMD_SYNC, sizeof(wowlan_config_cmd),
&wowlan_config_cmd);
ret = iwl_mvm_send_wowlan_config_cmd(mvm, &wowlan_config_cmd);
if (ret)
goto out;
......@@ -1160,7 +1031,7 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw,
if (ret)
goto out;
ret = iwl_mvm_send_proto_offload(mvm, vif);
ret = iwl_mvm_send_proto_offload(mvm, vif, false, CMD_SYNC);
if (ret)
goto out;
......
......@@ -312,6 +312,11 @@ static ssize_t iwl_dbgfs_reduced_txp_write(struct ieee80211_vif *vif,
mutex_lock(&mvm->mutex);
mvmsta = iwl_mvm_sta_from_staid_protected(mvm, mvmvif->ap_sta_id);
if (IS_ERR_OR_NULL(mvmsta)) {
mutex_unlock(&mvm->mutex);
return -ENOTCONN;
}
mvmsta->bt_reduced_txpower_dbg = false;
ret = iwl_mvm_bt_coex_reduced_txp(mvm, mvmvif->ap_sta_id,
reduced_tx_power);
......
......@@ -65,6 +65,7 @@
#include "iwl-io.h"
#include "iwl-prph.h"
#include "debugfs.h"
#include "fw-error-dump.h"
static ssize_t iwl_dbgfs_tx_flush_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
......@@ -117,6 +118,51 @@ static ssize_t iwl_dbgfs_sta_drain_write(struct iwl_mvm *mvm, char *buf,
return ret;
}
static int iwl_dbgfs_fw_error_dump_open(struct inode *inode, struct file *file)
{
struct iwl_mvm *mvm = inode->i_private;
int ret;
if (!mvm)
return -EINVAL;
mutex_lock(&mvm->mutex);
if (!mvm->fw_error_dump) {
ret = -ENODATA;
goto out;
}
file->private_data = mvm->fw_error_dump;
mvm->fw_error_dump = NULL;
kfree(mvm->fw_error_sram);
mvm->fw_error_sram = NULL;
mvm->fw_error_sram_len = 0;
ret = 0;
out:
mutex_unlock(&mvm->mutex);
return ret;
}
static ssize_t iwl_dbgfs_fw_error_dump_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct iwl_fw_error_dump_file *dump_file = file->private_data;
return simple_read_from_buffer(user_buf, count, ppos,
dump_file,
le32_to_cpu(dump_file->file_len));
}
static int iwl_dbgfs_fw_error_dump_release(struct inode *inode,
struct file *file)
{
vfree(file->private_data);
return 0;
}
static ssize_t iwl_dbgfs_sram_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
......@@ -350,6 +396,9 @@ static ssize_t iwl_dbgfs_bt_notif_read(struct file *file, char __user *user_buf,
le32_to_cpu(notif->secondary_ch_lut));
pos += scnprintf(buf+pos, bufsz-pos, "bt_activity_grading = %d\n",
le32_to_cpu(notif->bt_activity_grading));
pos += scnprintf(buf+pos, bufsz-pos,
"antenna isolation = %d CORUN LUT index = %d\n",
mvm->last_ant_isol, mvm->last_corun_lut);
mutex_unlock(&mvm->mutex);
......@@ -392,6 +441,22 @@ static ssize_t iwl_dbgfs_bt_cmd_read(struct file *file, char __user *user_buf,
return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
}
static ssize_t
iwl_dbgfs_bt_tx_prio_write(struct iwl_mvm *mvm, char *buf,
size_t count, loff_t *ppos)
{
u32 bt_tx_prio;
if (sscanf(buf, "%u", &bt_tx_prio) != 1)
return -EINVAL;
if (bt_tx_prio > 4)
return -EINVAL;
mvm->bt_tx_prio = bt_tx_prio;
return count;
}
#define PRINT_STATS_LE32(_str, _val) \
pos += scnprintf(buf + pos, bufsz - pos, \
fmt_table, _str, \
......@@ -536,56 +601,60 @@ static ssize_t iwl_dbgfs_frame_stats_read(struct iwl_mvm *mvm,
loff_t *ppos,
struct iwl_mvm_frame_stats *stats)
{
char *buff;
int pos = 0, idx, i;
char *buff, *pos, *endpos;
int idx, i;
int ret;
size_t bufsz = 1024;
static const size_t bufsz = 1024;
buff = kmalloc(bufsz, GFP_KERNEL);
if (!buff)
return -ENOMEM;
spin_lock_bh(&mvm->drv_stats_lock);
pos += scnprintf(buff + pos, bufsz - pos,
pos = buff;
endpos = pos + bufsz;
pos += scnprintf(pos, endpos - pos,
"Legacy/HT/VHT\t:\t%d/%d/%d\n",
stats->legacy_frames,
stats->ht_frames,
stats->vht_frames);
pos += scnprintf(buff + pos, bufsz - pos, "20/40/80\t:\t%d/%d/%d\n",
pos += scnprintf(pos, endpos - pos, "20/40/80\t:\t%d/%d/%d\n",
stats->bw_20_frames,
stats->bw_40_frames,
stats->bw_80_frames);
pos += scnprintf(buff + pos, bufsz - pos, "NGI/SGI\t\t:\t%d/%d\n",
pos += scnprintf(pos, endpos - pos, "NGI/SGI\t\t:\t%d/%d\n",
stats->ngi_frames,
stats->sgi_frames);
pos += scnprintf(buff + pos, bufsz - pos, "SISO/MIMO2\t:\t%d/%d\n",
pos += scnprintf(pos, endpos - pos, "SISO/MIMO2\t:\t%d/%d\n",
stats->siso_frames,
stats->mimo2_frames);
pos += scnprintf(buff + pos, bufsz - pos, "FAIL/SCSS\t:\t%d/%d\n",
pos += scnprintf(pos, endpos - pos, "FAIL/SCSS\t:\t%d/%d\n",
stats->fail_frames,
stats->success_frames);
pos += scnprintf(buff + pos, bufsz - pos, "MPDUs agg\t:\t%d\n",
pos += scnprintf(pos, endpos - pos, "MPDUs agg\t:\t%d\n",
stats->agg_frames);
pos += scnprintf(buff + pos, bufsz - pos, "A-MPDUs\t\t:\t%d\n",
pos += scnprintf(pos, endpos - pos, "A-MPDUs\t\t:\t%d\n",
stats->ampdu_count);
pos += scnprintf(buff + pos, bufsz - pos, "Avg MPDUs/A-MPDU:\t%d\n",
pos += scnprintf(pos, endpos - pos, "Avg MPDUs/A-MPDU:\t%d\n",
stats->ampdu_count > 0 ?
(stats->agg_frames / stats->ampdu_count) : 0);
pos += scnprintf(buff + pos, bufsz - pos, "Last Rates\n");
pos += scnprintf(pos, endpos - pos, "Last Rates\n");
idx = stats->last_frame_idx - 1;
for (i = 0; i < ARRAY_SIZE(stats->last_rates); i++) {
idx = (idx + 1) % ARRAY_SIZE(stats->last_rates);
if (stats->last_rates[idx] == 0)
continue;
pos += scnprintf(buff + pos, bufsz - pos, "Rate[%d]: ",
pos += scnprintf(pos, endpos - pos, "Rate[%d]: ",
(int)(ARRAY_SIZE(stats->last_rates) - i));
pos += rs_pretty_print_rate(buff + pos, stats->last_rates[idx]);
pos += rs_pretty_print_rate(pos, stats->last_rates[idx]);
}
spin_unlock_bh(&mvm->drv_stats_lock);
ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos);
ret = simple_read_from_buffer(user_buf, count, ppos, buff, pos - buff);
kfree(buff);
return ret;
......@@ -1032,9 +1101,16 @@ MVM_DEBUGFS_READ_FILE_OPS(fw_rx_stats);
MVM_DEBUGFS_READ_FILE_OPS(drv_rx_stats);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_restart, 10);
MVM_DEBUGFS_WRITE_FILE_OPS(fw_nmi, 10);
MVM_DEBUGFS_WRITE_FILE_OPS(bt_tx_prio, 10);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(scan_ant_rxchain, 8);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(d0i3_refs, 8);
static const struct file_operations iwl_dbgfs_fw_error_dump_ops = {
.open = iwl_dbgfs_fw_error_dump_open,
.read = iwl_dbgfs_fw_error_dump_read,
.release = iwl_dbgfs_fw_error_dump_release,
};
#ifdef CONFIG_IWLWIFI_BCAST_FILTERING
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters, 256);
MVM_DEBUGFS_READ_WRITE_FILE_OPS(bcast_filters_macs, 256);
......@@ -1049,12 +1125,15 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
struct dentry *bcast_dir __maybe_unused;
char buf[100];
spin_lock_init(&mvm->drv_stats_lock);
mvm->debugfs_dir = dbgfs_dir;
MVM_DEBUGFS_ADD_FILE(tx_flush, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(sta_drain, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(sram, mvm->debugfs_dir, S_IWUSR | S_IRUSR);
MVM_DEBUGFS_ADD_FILE(stations, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(fw_error_dump, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(bt_notif, dbgfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(bt_cmd, dbgfs_dir, S_IRUSR);
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_DEVICE_PS_CMD)
......@@ -1064,6 +1143,7 @@ int iwl_mvm_dbgfs_register(struct iwl_mvm *mvm, struct dentry *dbgfs_dir)
MVM_DEBUGFS_ADD_FILE(drv_rx_stats, mvm->debugfs_dir, S_IRUSR);
MVM_DEBUGFS_ADD_FILE(fw_restart, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(fw_nmi, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(bt_tx_prio, mvm->debugfs_dir, S_IWUSR);
MVM_DEBUGFS_ADD_FILE(scan_ant_rxchain, mvm->debugfs_dir,
S_IWUSR | S_IRUSR);
MVM_DEBUGFS_ADD_FILE(prph_reg, mvm->debugfs_dir, S_IWUSR | S_IRUSR);
......
......@@ -77,6 +77,8 @@
* @BT_COEX_3W:
* @BT_COEX_NW:
* @BT_COEX_SYNC2SCO:
* @BT_COEX_CORUNNING:
* @BT_COEX_MPLUT:
*
* The COEX_MODE must be set for each command. Even if it is not changed.
*/
......@@ -88,6 +90,8 @@ enum iwl_bt_coex_flags {
BT_COEX_3W = 0x2 << BT_COEX_MODE_POS,
BT_COEX_NW = 0x3 << BT_COEX_MODE_POS,
BT_COEX_SYNC2SCO = BIT(7),
BT_COEX_CORUNNING = BIT(8),
BT_COEX_MPLUT = BIT(9),
};
/*
......
......@@ -239,7 +239,7 @@ enum iwl_wowlan_wakeup_filters {
IWL_WOWLAN_WAKEUP_BCN_FILTERING = BIT(16),
}; /* WOWLAN_WAKEUP_FILTER_API_E_VER_4 */
struct iwl_wowlan_config_cmd {
struct iwl_wowlan_config_cmd_v2 {
__le32 wakeup_filter;
__le16 non_qos_seq;
__le16 qos_seq[8];
......@@ -247,6 +247,12 @@ struct iwl_wowlan_config_cmd {
u8 is_11n_connection;
} __packed; /* WOWLAN_CONFIG_API_S_VER_2 */
struct iwl_wowlan_config_cmd_v3 {
struct iwl_wowlan_config_cmd_v2 common;
u8 offloading_tid;
u8 reserved[3];
} __packed; /* WOWLAN_CONFIG_API_S_VER_3 */
/*
* WOWLAN_TSC_RSC_PARAMS
*/
......
......@@ -76,6 +76,8 @@
* @TX_CMD_FLG_VHT_NDPA: mark frame is NDPA for VHT beamformer sequence
* @TX_CMD_FLG_HT_NDPA: mark frame is NDPA for HT beamformer sequence
* @TX_CMD_FLG_CSI_FDBK2HOST: mark to send feedback to host (only if good CRC)
* @TX_CMD_FLG_BT_PRIO_POS: the position of the BT priority (bit 11 is ignored
* on old firmwares).
* @TX_CMD_FLG_BT_DIS: disable BT priority for this frame
* @TX_CMD_FLG_SEQ_CTL: set if FW should override the sequence control.
* Should be set for mgmt, non-QOS data, mcast, bcast and in scan command
......@@ -107,6 +109,7 @@ enum iwl_tx_flags {
TX_CMD_FLG_VHT_NDPA = BIT(8),
TX_CMD_FLG_HT_NDPA = BIT(9),
TX_CMD_FLG_CSI_FDBK2HOST = BIT(10),
TX_CMD_FLG_BT_PRIO_POS = 11,
TX_CMD_FLG_BT_DIS = BIT(12),
TX_CMD_FLG_SEQ_CTL = BIT(13),
TX_CMD_FLG_MORE_FRAG = BIT(14),
......
......@@ -70,7 +70,7 @@
#include "fw-api-mac.h"
#include "fw-api-power.h"
#include "fw-api-d3.h"
#include "fw-api-bt-coex.h"
#include "fw-api-coex.h"
/* maximal number of Tx queues in any platform */
#define IWL_MVM_MAX_QUEUES 20
......@@ -95,6 +95,7 @@ enum {
/* PHY context commands */
PHY_CONTEXT_CMD = 0x8,
DBG_CFG = 0x9,
ANTENNA_COUPLING_NOTIFICATION = 0xa,
/* station table */
ADD_STA_KEY = 0x17,
......
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*****************************************************************************/
#ifndef __fw_error_dump_h__
#define __fw_error_dump_h__
#include <linux/types.h>
#define IWL_FW_ERROR_DUMP_BARKER 0x14789632
/**
* enum iwl_fw_error_dump_type - types of data in the dump file
* @IWL_FW_ERROR_DUMP_SRAM:
* @IWL_FW_ERROR_DUMP_REG:
*/
enum iwl_fw_error_dump_type {
IWL_FW_ERROR_DUMP_SRAM = 0,
IWL_FW_ERROR_DUMP_REG = 1,
IWL_FW_ERROR_DUMP_MAX,
};
/**
* struct iwl_fw_error_dump_data - data for one type
* @type: %enum iwl_fw_error_dump_type
* @len: the length starting from %data - must be a multiplier of 4.
* @data: the data itself padded to be a multiplier of 4.
*/
struct iwl_fw_error_dump_data {
__le32 type;
__le32 len;
__u8 data[];
} __packed __aligned(4);
/**
* struct iwl_fw_error_dump_file - the layout of the header of the file
* @barker: must be %IWL_FW_ERROR_DUMP_BARKER
* @file_len: the length of all the file starting from %barker
* @data: array of %struct iwl_fw_error_dump_data
*/
struct iwl_fw_error_dump_file {
__le32 barker;
__le32 file_len;
u8 data[0];
} __packed __aligned(4);
#endif /* __fw_error_dump_h__ */
......@@ -94,6 +94,8 @@ int iwl_mvm_leds_init(struct iwl_mvm *mvm)
int ret;
switch (mode) {
case IWL_LED_BLINK:
IWL_ERR(mvm, "Blink led mode not supported, used default\n");
case IWL_LED_DEFAULT:
case IWL_LED_RF_STATE:
mode = IWL_LED_RF_STATE;
......
......@@ -205,7 +205,7 @@ static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = {
void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
{
if (!mvm->trans->cfg->d0i3)
if (!iwl_mvm_is_d0i3_supported(mvm))
return;
IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type);
......@@ -215,7 +215,7 @@ void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type)
{
if (!mvm->trans->cfg->d0i3)
if (!iwl_mvm_is_d0i3_supported(mvm))
return;
IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type);
......@@ -228,7 +228,7 @@ iwl_mvm_unref_all_except(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref)
{
int i;
if (!mvm->trans->cfg->d0i3)
if (!iwl_mvm_is_d0i3_supported(mvm))
return;
for_each_set_bit(i, mvm->ref_bitmap, IWL_MVM_REF_COUNT) {
......@@ -295,7 +295,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
!iwlwifi_mod_params.sw_crypto)
hw->flags |= IEEE80211_HW_MFP_CAPABLE;
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT) {
if (0 && mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT) {
hw->flags |= IEEE80211_HW_SUPPORTS_UAPSD;
hw->uapsd_queues = IWL_UAPSD_AC_INFO;
hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP;
......@@ -365,7 +365,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
else
hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT;
if (0 && mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
if (mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_SCHED_SCAN) {
hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX;
hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES;
......@@ -375,8 +375,7 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
}
hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN |
NL80211_FEATURE_P2P_GO_OPPPS |
NL80211_FEATURE_LOW_PRIORITY_SCAN;
NL80211_FEATURE_P2P_GO_OPPPS;
mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD;
......@@ -424,6 +423,47 @@ int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm)
return ret;
}
static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm,
struct ieee80211_sta *sta,
struct sk_buff *skb)
{
struct iwl_mvm_sta *mvmsta;
bool defer = false;
/*
* double check the IN_D0I3 flag both before and after
* taking the spinlock, in order to prevent taking
* the spinlock when not needed.
*/
if (likely(!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)))
return false;
spin_lock(&mvm->d0i3_tx_lock);
/*
* testing the flag again ensures the skb dequeue
* loop (on d0i3 exit) hasn't run yet.
*/
if (!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))
goto out;
mvmsta = iwl_mvm_sta_from_mac80211(sta);
if (mvmsta->sta_id == IWL_MVM_STATION_COUNT ||
mvmsta->sta_id != mvm->d0i3_ap_sta_id)
goto out;
__skb_queue_tail(&mvm->d0i3_tx, skb);
ieee80211_stop_queues(mvm->hw);
/* trigger wakeup */
iwl_mvm_ref(mvm, IWL_MVM_REF_TX);
iwl_mvm_unref(mvm, IWL_MVM_REF_TX);
defer = true;
out:
spin_unlock(&mvm->d0i3_tx_lock);
return defer;
}
static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_control *control,
struct sk_buff *skb)
......@@ -451,6 +491,8 @@ static void iwl_mvm_mac_tx(struct ieee80211_hw *hw,
sta = NULL;
if (sta) {
if (iwl_mvm_defer_tx(mvm, sta, skb))
return;
if (iwl_mvm_tx_skb(mvm, skb, sta))
goto drop;
return;
......@@ -489,6 +531,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
bool tx_agg_ref = false;
IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n",
sta->addr, tid, action);
......@@ -496,6 +539,23 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
if (!(mvm->nvm_data->sku_cap_11n_enable))
return -EACCES;
/* return from D0i3 before starting a new Tx aggregation */
if (action == IEEE80211_AMPDU_TX_START) {
iwl_mvm_ref(mvm, IWL_MVM_REF_TX_AGG);
tx_agg_ref = true;
/*
* wait synchronously until D0i3 exit to get the correct
* sequence number for the tid
*/
if (!wait_event_timeout(mvm->d0i3_exit_waitq,
!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status), HZ)) {
WARN_ON_ONCE(1);
iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG);
return -EIO;
}
}
mutex_lock(&mvm->mutex);
switch (action) {
......@@ -533,6 +593,13 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw,
}
mutex_unlock(&mvm->mutex);
/*
* If the tid is marked as started, we won't use it for offloaded
* traffic on the next D0i3 entry. It's safe to unref.
*/
if (tx_agg_ref)
iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG);
return ret;
}
......@@ -557,6 +624,15 @@ static void iwl_mvm_cleanup_iterator(void *data, u8 *mac,
static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm)
{
#ifdef CONFIG_IWLWIFI_DEBUGFS
static char *env[] = { "DRIVER=iwlwifi", "EVENT=error_dump", NULL };
iwl_mvm_fw_error_dump(mvm);
/* notify the userspace about the error we had */
kobject_uevent_env(&mvm->hw->wiphy->dev.kobj, KOBJ_CHANGE, env);
#endif
iwl_trans_stop_device(mvm->trans);
mvm->scan_status = IWL_MVM_SCAN_NONE;
......@@ -610,6 +686,7 @@ static void iwl_mvm_mac_restart_complete(struct ieee80211_hw *hw)
mutex_lock(&mvm->mutex);
clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
iwl_mvm_d0i3_enable_tx(mvm, NULL);
ret = iwl_mvm_update_quotas(mvm, NULL);
if (ret)
IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
......@@ -1255,6 +1332,7 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm,
*/
iwl_mvm_remove_time_event(mvm, mvmvif,
&mvmvif->time_event_data);
WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, CMD_SYNC));
} else if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS |
BSS_CHANGED_QOS)) {
ret = iwl_mvm_power_update_mac(mvm, vif);
......@@ -1437,8 +1515,6 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
struct cfg80211_scan_request *req)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
struct iwl_notification_wait wait_scan_done;
static const u8 scan_done_notif[] = { SCAN_OFFLOAD_COMPLETE, };
int ret;
if (req->n_channels == 0 || req->n_channels > MAX_NUM_SCAN_CHANNELS)
......@@ -1448,22 +1524,11 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
switch (mvm->scan_status) {
case IWL_MVM_SCAN_SCHED:
iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
scan_done_notif,
ARRAY_SIZE(scan_done_notif),
NULL, NULL);
iwl_mvm_sched_scan_stop(mvm);
ret = iwl_wait_notification(&mvm->notif_wait,
&wait_scan_done, HZ);
ret = iwl_mvm_sched_scan_stop(mvm);
if (ret) {
ret = -EBUSY;
goto out;
}
/* iwl_mvm_rx_scan_offload_complete_notif() will be called
* soon but will not reset the scan status as it won't be
* IWL_MVM_SCAN_SCHED any more since we queue the next scan
* immediately (below)
*/
break;
case IWL_MVM_SCAN_NONE:
break;
......@@ -1479,7 +1544,8 @@ static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw,
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
out:
mutex_unlock(&mvm->mutex);
/* make sure to flush the Rx handler before the next scan arrives */
iwl_mvm_wait_for_async_handlers(mvm);
return ret;
}
......@@ -1641,7 +1707,9 @@ static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw,
} else if (old_state == IEEE80211_STA_ASSOC &&
new_state == IEEE80211_STA_AUTHORIZED) {
/* enable beacon filtering */
WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, CMD_SYNC));
if (vif->bss_conf.dtim_period)
WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif,
CMD_SYNC));
ret = 0;
} else if (old_state == IEEE80211_STA_AUTHORIZED &&
new_state == IEEE80211_STA_ASSOC) {
......@@ -1738,9 +1806,26 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
mutex_lock(&mvm->mutex);
if (mvm->scan_status != IWL_MVM_SCAN_NONE) {
IWL_DEBUG_SCAN(mvm,
"SCHED SCAN request during internal scan - abort\n");
switch (mvm->scan_status) {
case IWL_MVM_SCAN_OS:
IWL_DEBUG_SCAN(mvm, "Stopping previous scan for sched_scan\n");
ret = iwl_mvm_cancel_scan(mvm);
if (ret) {
ret = -EBUSY;
goto out;
}
/*
* iwl_mvm_rx_scan_complete() will be called soon but will
* not reset the scan status as it won't be IWL_MVM_SCAN_OS
* any more since we queue the next scan immediately (below).
* We make sure it is called before the next scan starts by
* flushing the async-handlers work.
*/
break;
case IWL_MVM_SCAN_NONE:
break;
default:
ret = -EBUSY;
goto out;
}
......@@ -1762,6 +1847,8 @@ static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw,
mvm->scan_status = IWL_MVM_SCAN_NONE;
out:
mutex_unlock(&mvm->mutex);
/* make sure to flush the Rx handler before the next scan arrives */
iwl_mvm_wait_for_async_handlers(mvm);
return ret;
}
......@@ -1769,12 +1856,14 @@ static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw);
int ret;
mutex_lock(&mvm->mutex);
iwl_mvm_sched_scan_stop(mvm);
ret = iwl_mvm_sched_scan_stop(mvm);
mutex_unlock(&mvm->mutex);
iwl_mvm_wait_for_async_handlers(mvm);
return 0;
return ret;
}
static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw,
......
......@@ -230,6 +230,8 @@ enum iwl_mvm_ref_type {
IWL_MVM_REF_P2P_CLIENT,
IWL_MVM_REF_AP_IBSS,
IWL_MVM_REF_USER,
IWL_MVM_REF_TX,
IWL_MVM_REF_TX_AGG,
IWL_MVM_REF_COUNT,
};
......@@ -317,13 +319,13 @@ struct iwl_mvm_vif {
bool seqno_valid;
u16 seqno;
#endif
#if IS_ENABLED(CONFIG_IPV6)
/* IPv6 addresses for WoWLAN */
struct in6_addr target_ipv6_addrs[IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_MAX];
int num_target_ipv6_addrs;
#endif
#endif
#ifdef CONFIG_IWLWIFI_DEBUGFS
struct iwl_mvm *mvm;
......@@ -346,6 +348,8 @@ iwl_mvm_vif_from_mac80211(struct ieee80211_vif *vif)
return (void *)vif->drv_priv;
}
extern const u8 tid_to_mac80211_ac[];
enum iwl_scan_status {
IWL_MVM_SCAN_NONE,
IWL_MVM_SCAN_OS,
......@@ -571,6 +575,9 @@ struct iwl_mvm {
/* -1 for always, 0 for never, >0 for that many times */
s8 restart_fw;
void *fw_error_dump;
void *fw_error_sram;
u32 fw_error_sram_len;
struct led_classdev led;
......@@ -591,12 +598,20 @@ struct iwl_mvm {
/* d0i3 */
u8 d0i3_ap_sta_id;
bool d0i3_offloading;
struct work_struct d0i3_exit_work;
struct sk_buff_head d0i3_tx;
/* sync d0i3_tx queue and IWL_MVM_STATUS_IN_D0I3 status flag */
spinlock_t d0i3_tx_lock;
wait_queue_head_t d0i3_exit_waitq;
/* BT-Coex */
u8 bt_kill_msk;
struct iwl_bt_coex_profile_notif last_bt_notif;
struct iwl_bt_coex_ci_cmd last_bt_ci_cmd;
u32 last_ant_isol;
u8 last_corun_lut;
u8 bt_tx_prio;
/* Thermal Throttling and CTkill */
struct iwl_mvm_tt_mgmt thermal_throttle;
......@@ -630,6 +645,7 @@ enum iwl_mvm_status {
IWL_MVM_STATUS_HW_CTKILL,
IWL_MVM_STATUS_ROC_RUNNING,
IWL_MVM_STATUS_IN_HW_RESTART,
IWL_MVM_STATUS_IN_D0I3,
};
static inline bool iwl_mvm_is_radio_killed(struct iwl_mvm *mvm)
......@@ -656,6 +672,12 @@ iwl_mvm_sta_from_staid_protected(struct iwl_mvm *mvm, u8 sta_id)
return iwl_mvm_sta_from_mac80211(sta);
}
static inline bool iwl_mvm_is_d0i3_supported(struct iwl_mvm *mvm)
{
return mvm->trans->cfg->d0i3 &&
(mvm->fw->ucode_capa.capa[0] & IWL_UCODE_TLV_CAPA_D0I3_SUPPORT);
}
extern const u8 iwl_mvm_ac_to_tx_fifo[];
struct iwl_rate_info {
......@@ -680,7 +702,10 @@ void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
struct ieee80211_tx_rate *r);
u8 iwl_mvm_mac80211_idx_to_hwrate(int rate_idx);
void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm);
void iwl_mvm_dump_sram(struct iwl_mvm *mvm);
#ifdef CONFIG_IWLWIFI_DEBUGFS
void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm);
void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm);
#endif
u8 first_antenna(u8 mask);
u8 iwl_mvm_next_antenna(struct iwl_mvm *mvm, u8 valid, u8 last_idx);
......@@ -706,6 +731,11 @@ static inline const char *iwl_mvm_get_tx_fail_reason(u32 status) { return ""; }
int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk, bool sync);
void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm);
static inline void iwl_mvm_wait_for_async_handlers(struct iwl_mvm *mvm)
{
flush_work(&mvm->async_handlers_wk);
}
/* Statistics */
int iwl_mvm_rx_reply_statistics(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
......@@ -739,6 +769,9 @@ int iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_radio_ver(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_ant_coupling_notif(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_fw_error(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_card_state_notif(struct iwl_mvm *mvm,
......@@ -793,7 +826,7 @@ int iwl_mvm_rx_scan_response(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
void iwl_mvm_cancel_scan(struct iwl_mvm *mvm);
int iwl_mvm_cancel_scan(struct iwl_mvm *mvm);
/* Scheduled scan */
int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
......@@ -807,7 +840,7 @@ int iwl_mvm_config_sched_scan_profiles(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req);
int iwl_mvm_sched_scan_start(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req);
void iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm);
int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm);
int iwl_mvm_rx_sched_scan_results(struct iwl_mvm *mvm,
struct iwl_rx_cmd_buffer *rxb,
struct iwl_device_cmd *cmd);
......@@ -878,10 +911,17 @@ iwl_mvm_set_last_nonqos_seq(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
{
}
#endif
void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta,
struct iwl_wowlan_config_cmd_v2 *cmd);
int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
bool disable_offloading,
u32 cmd_flags);
/* D0i3 */
void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type);
void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq);
/* BT Coex */
int iwl_send_bt_prio_tbl(struct iwl_mvm *mvm);
......@@ -892,10 +932,12 @@ int iwl_mvm_rx_bt_coex_notif(struct iwl_mvm *mvm,
void iwl_mvm_bt_rssi_event(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
enum ieee80211_rssi_event rssi_event);
void iwl_mvm_bt_coex_vif_change(struct iwl_mvm *mvm);
u16 iwl_mvm_bt_coex_agg_time_limit(struct iwl_mvm *mvm,
struct ieee80211_sta *sta);
u16 iwl_mvm_coex_agg_time_limit(struct iwl_mvm *mvm,
struct ieee80211_sta *sta);
bool iwl_mvm_bt_coex_is_mimo_allowed(struct iwl_mvm *mvm,
struct ieee80211_sta *sta);
u8 iwl_mvm_bt_coex_tx_prio(struct iwl_mvm *mvm, struct ieee80211_hdr *hdr,
struct ieee80211_tx_info *info, u8 ac);
int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id, bool enable);
enum iwl_bt_kill_msk {
......@@ -942,6 +984,8 @@ void iwl_mvm_update_smps(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
/* Low latency */
int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
bool value);
/* get SystemLowLatencyMode - only needed for beacon threshold? */
bool iwl_mvm_low_latency(struct iwl_mvm *mvm);
/* get VMACLowLatencyMode */
static inline bool iwl_mvm_vif_low_latency(struct iwl_mvm_vif *mvmvif)
{
......
/******************************************************************************
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
*
* GPL LICENSE SUMMARY
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110,
* USA
*
* The full GNU General Public License is included in this distribution
* in the file called COPYING.
*
* Contact Information:
* Intel Linux Wireless <ilw@linux.intel.com>
* Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
*
* BSD LICENSE
*
* Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in
* the documentation and/or other materials provided with the
* distribution.
* * Neither the name Intel Corporation nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
* A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
* OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*****************************************************************************/
#include <net/ipv6.h>
#include <net/addrconf.h>
#include "mvm.h"
void iwl_mvm_set_wowlan_qos_seq(struct iwl_mvm_sta *mvm_ap_sta,
struct iwl_wowlan_config_cmd_v2 *cmd)
{
int i;
/*
* For QoS counters, we store the one to use next, so subtract 0x10
* since the uCode will add 0x10 *before* using the value while we
* increment after using the value (i.e. store the next value to use).
*/
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
u16 seq = mvm_ap_sta->tid_data[i].seq_number;
seq -= 0x10;
cmd->qos_seq[i] = cpu_to_le16(seq);
}
}
int iwl_mvm_send_proto_offload(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
bool disable_offloading,
u32 cmd_flags)
{
union {
struct iwl_proto_offload_cmd_v1 v1;
struct iwl_proto_offload_cmd_v2 v2;
struct iwl_proto_offload_cmd_v3_small v3s;
struct iwl_proto_offload_cmd_v3_large v3l;
} cmd = {};
struct iwl_host_cmd hcmd = {
.id = PROT_OFFLOAD_CONFIG_CMD,
.flags = cmd_flags,
.data[0] = &cmd,
.dataflags[0] = IWL_HCMD_DFL_DUP,
};
struct iwl_proto_offload_cmd_common *common;
u32 enabled = 0, size;
u32 capa_flags = mvm->fw->ucode_capa.flags;
#if IS_ENABLED(CONFIG_IPV6)
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
int i;
if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL ||
capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
struct iwl_ns_config *nsc;
struct iwl_targ_addr *addrs;
int n_nsc, n_addrs;
int c;
if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
nsc = cmd.v3s.ns_config;
n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3S;
addrs = cmd.v3s.targ_addrs;
n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3S;
} else {
nsc = cmd.v3l.ns_config;
n_nsc = IWL_PROTO_OFFLOAD_NUM_NS_CONFIG_V3L;
addrs = cmd.v3l.targ_addrs;
n_addrs = IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V3L;
}
if (mvmvif->num_target_ipv6_addrs)
enabled |= IWL_D3_PROTO_OFFLOAD_NS;
/*
* For each address we have (and that will fit) fill a target
* address struct and combine for NS offload structs with the
* solicited node addresses.
*/
for (i = 0, c = 0;
i < mvmvif->num_target_ipv6_addrs &&
i < n_addrs && c < n_nsc; i++) {
struct in6_addr solicited_addr;
int j;
addrconf_addr_solict_mult(&mvmvif->target_ipv6_addrs[i],
&solicited_addr);
for (j = 0; j < c; j++)
if (ipv6_addr_cmp(&nsc[j].dest_ipv6_addr,
&solicited_addr) == 0)
break;
if (j == c)
c++;
addrs[i].addr = mvmvif->target_ipv6_addrs[i];
addrs[i].config_num = cpu_to_le32(j);
nsc[j].dest_ipv6_addr = solicited_addr;
memcpy(nsc[j].target_mac_addr, vif->addr, ETH_ALEN);
}
if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL)
cmd.v3s.num_valid_ipv6_addrs = cpu_to_le32(i);
else
cmd.v3l.num_valid_ipv6_addrs = cpu_to_le32(i);
} else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
if (mvmvif->num_target_ipv6_addrs) {
enabled |= IWL_D3_PROTO_OFFLOAD_NS;
memcpy(cmd.v2.ndp_mac_addr, vif->addr, ETH_ALEN);
}
BUILD_BUG_ON(sizeof(cmd.v2.target_ipv6_addr[0]) !=
sizeof(mvmvif->target_ipv6_addrs[0]));
for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V2); i++)
memcpy(cmd.v2.target_ipv6_addr[i],
&mvmvif->target_ipv6_addrs[i],
sizeof(cmd.v2.target_ipv6_addr[i]));
} else {
if (mvmvif->num_target_ipv6_addrs) {
enabled |= IWL_D3_PROTO_OFFLOAD_NS;
memcpy(cmd.v1.ndp_mac_addr, vif->addr, ETH_ALEN);
}
BUILD_BUG_ON(sizeof(cmd.v1.target_ipv6_addr[0]) !=
sizeof(mvmvif->target_ipv6_addrs[0]));
for (i = 0; i < min(mvmvif->num_target_ipv6_addrs,
IWL_PROTO_OFFLOAD_NUM_IPV6_ADDRS_V1); i++)
memcpy(cmd.v1.target_ipv6_addr[i],
&mvmvif->target_ipv6_addrs[i],
sizeof(cmd.v1.target_ipv6_addr[i]));
}
#endif
if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_SMALL) {
common = &cmd.v3s.common;
size = sizeof(cmd.v3s);
} else if (capa_flags & IWL_UCODE_TLV_FLAGS_NEW_NSOFFL_LARGE) {
common = &cmd.v3l.common;
size = sizeof(cmd.v3l);
} else if (capa_flags & IWL_UCODE_TLV_FLAGS_D3_6_IPV6_ADDRS) {
common = &cmd.v2.common;
size = sizeof(cmd.v2);
} else {
common = &cmd.v1.common;
size = sizeof(cmd.v1);
}
if (vif->bss_conf.arp_addr_cnt) {
enabled |= IWL_D3_PROTO_OFFLOAD_ARP;
common->host_ipv4_addr = vif->bss_conf.arp_addr_list[0];
memcpy(common->arp_mac_addr, vif->addr, ETH_ALEN);
}
if (!disable_offloading)
common->enabled = cpu_to_le32(enabled);
hcmd.len[0] = size;
return iwl_mvm_send_cmd(mvm, &hcmd);
}
......@@ -61,6 +61,7 @@
*
*****************************************************************************/
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <net/mac80211.h>
#include "iwl-notif-wait.h"
......@@ -78,6 +79,7 @@
#include "iwl-prph.h"
#include "rs.h"
#include "fw-api-scan.h"
#include "fw-error-dump.h"
#include "time-event.h"
/*
......@@ -220,13 +222,15 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif, true),
RX_HANDLER(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif, false),
RX_HANDLER(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics, true),
RX_HANDLER(ANTENNA_COUPLING_NOTIFICATION,
iwl_mvm_rx_ant_coupling_notif, true),
RX_HANDLER(TIME_EVENT_NOTIFICATION, iwl_mvm_rx_time_event_notif, false),
RX_HANDLER(EOSP_NOTIFICATION, iwl_mvm_rx_eosp_notif, false),
RX_HANDLER(SCAN_REQUEST_CMD, iwl_mvm_rx_scan_response, false),
RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, false),
RX_HANDLER(SCAN_COMPLETE_NOTIFICATION, iwl_mvm_rx_scan_complete, true),
RX_HANDLER(SCAN_OFFLOAD_COMPLETE,
iwl_mvm_rx_scan_offload_complete_notif, true),
RX_HANDLER(MATCH_FOUND_NOTIFICATION, iwl_mvm_rx_sched_scan_results,
......@@ -321,6 +325,7 @@ static const char *const iwl_mvm_cmd_strings[REPLY_MAX] = {
CMD(MAC_PM_POWER_TABLE),
CMD(BT_COEX_CI),
CMD(PSM_UAPSD_AP_MISBEHAVING_NOTIFICATION),
CMD(ANTENNA_COUPLING_NOTIFICATION),
};
#undef CMD
......@@ -407,6 +412,10 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_WORK(&mvm->sta_drained_wk, iwl_mvm_sta_drained_wk);
INIT_WORK(&mvm->d0i3_exit_work, iwl_mvm_d0i3_exit_work);
spin_lock_init(&mvm->d0i3_tx_lock);
skb_queue_head_init(&mvm->d0i3_tx);
init_waitqueue_head(&mvm->d0i3_exit_waitq);
SET_IEEE80211_DEV(mvm->hw, mvm->trans->dev);
/*
......@@ -527,6 +536,8 @@ static void iwl_op_mode_mvm_stop(struct iwl_op_mode *op_mode)
ieee80211_unregister_hw(mvm->hw);
kfree(mvm->scan_cmd);
vfree(mvm->fw_error_dump);
kfree(mvm->fw_error_sram);
kfree(mvm->mcast_filter_cmd);
mvm->mcast_filter_cmd = NULL;
......@@ -690,7 +701,7 @@ void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm));
}
static void iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
......@@ -699,9 +710,9 @@ static void iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
else
clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
if (state && mvm->cur_ucode != IWL_UCODE_INIT)
iwl_trans_stop_device(mvm->trans);
wiphy_rfkill_set_hw_state(mvm->hw->wiphy, iwl_mvm_is_radio_killed(mvm));
return state && mvm->cur_ucode != IWL_UCODE_INIT;
}
static void iwl_mvm_free_skb(struct iwl_op_mode *op_mode, struct sk_buff *skb)
......@@ -797,13 +808,52 @@ static void iwl_mvm_nic_restart(struct iwl_mvm *mvm)
}
}
#ifdef CONFIG_IWLWIFI_DEBUGFS
void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
{
struct iwl_fw_error_dump_file *dump_file;
struct iwl_fw_error_dump_data *dump_data;
u32 file_len;
lockdep_assert_held(&mvm->mutex);
if (mvm->fw_error_dump)
return;
file_len = mvm->fw_error_sram_len +
sizeof(*dump_file) +
sizeof(*dump_data);
dump_file = vmalloc(file_len);
if (!dump_file)
return;
mvm->fw_error_dump = dump_file;
dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER);
dump_file->file_len = cpu_to_le32(file_len);
dump_data = (void *)dump_file->data;
dump_data->type = IWL_FW_ERROR_DUMP_SRAM;
dump_data->len = cpu_to_le32(mvm->fw_error_sram_len);
/*
* No need for lock since at the stage the FW isn't loaded. So it
* can't assert - we are the only one who can possibly be accessing
* mvm->fw_error_sram right now.
*/
memcpy(dump_data->data, mvm->fw_error_sram, mvm->fw_error_sram_len);
}
#endif
static void iwl_mvm_nic_error(struct iwl_op_mode *op_mode)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
iwl_mvm_dump_nic_error_log(mvm);
if (!mvm->restart_fw)
iwl_mvm_dump_sram(mvm);
#ifdef CONFIG_IWLWIFI_DEBUGFS
iwl_mvm_fw_error_sram_dump(mvm);
#endif
iwl_mvm_nic_restart(mvm);
}
......@@ -820,8 +870,62 @@ struct iwl_d0i3_iter_data {
struct iwl_mvm *mvm;
u8 ap_sta_id;
u8 vif_count;
u8 offloading_tid;
bool disable_offloading;
};
static bool iwl_mvm_disallow_offloading(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct iwl_d0i3_iter_data *iter_data)
{
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
struct ieee80211_sta *ap_sta;
struct iwl_mvm_sta *mvmsta;
u32 available_tids = 0;
u8 tid;
if (WARN_ON(vif->type != NL80211_IFTYPE_STATION ||
mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT))
return false;
ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[mvmvif->ap_sta_id]);
if (IS_ERR_OR_NULL(ap_sta))
return false;
mvmsta = iwl_mvm_sta_from_mac80211(ap_sta);
spin_lock_bh(&mvmsta->lock);
for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) {
struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
/*
* in case of pending tx packets, don't use this tid
* for offloading in order to prevent reuse of the same
* qos seq counters.
*/
if (iwl_mvm_tid_queued(tid_data))
continue;
if (tid_data->state != IWL_AGG_OFF)
continue;
available_tids |= BIT(tid);
}
spin_unlock_bh(&mvmsta->lock);
/*
* disallow protocol offloading if we have no available tid
* (with no pending frames and no active aggregation,
* as we don't handle "holes" properly - the scheduler needs the
* frame's seq number and TFD index to match)
*/
if (!available_tids)
return true;
/* for simplicity, just use the first available tid */
iter_data->offloading_tid = ffs(available_tids) - 1;
return false;
}
static void iwl_mvm_enter_d0i3_iterator(void *_data, u8 *mac,
struct ieee80211_vif *vif)
{
......@@ -835,7 +939,16 @@ static void iwl_mvm_enter_d0i3_iterator(void *_data, u8 *mac,
!vif->bss_conf.assoc)
return;
/*
* in case of pending tx packets or active aggregations,
* avoid offloading features in order to prevent reuse of
* the same qos seq counters.
*/
if (iwl_mvm_disallow_offloading(mvm, vif, data))
data->disable_offloading = true;
iwl_mvm_update_d0i3_power_mode(mvm, vif, true, flags);
iwl_mvm_send_proto_offload(mvm, vif, data->disable_offloading, flags);
/*
* on init/association, mvm already configures POWER_TABLE_CMD
......@@ -847,6 +960,34 @@ static void iwl_mvm_enter_d0i3_iterator(void *_data, u8 *mac,
data->vif_count++;
}
static void iwl_mvm_set_wowlan_data(struct iwl_mvm *mvm,
struct iwl_wowlan_config_cmd_v3 *cmd,
struct iwl_d0i3_iter_data *iter_data)
{
struct ieee80211_sta *ap_sta;
struct iwl_mvm_sta *mvm_ap_sta;
if (iter_data->ap_sta_id == IWL_MVM_STATION_COUNT)
return;
rcu_read_lock();
ap_sta = rcu_dereference(mvm->fw_id_to_mac_id[iter_data->ap_sta_id]);
if (IS_ERR_OR_NULL(ap_sta))
goto out;
mvm_ap_sta = iwl_mvm_sta_from_mac80211(ap_sta);
cmd->common.is_11n_connection = ap_sta->ht_cap.ht_supported;
cmd->offloading_tid = iter_data->offloading_tid;
/*
* The d0i3 uCode takes care of the nonqos counters,
* so configure only the qos seq ones.
*/
iwl_mvm_set_wowlan_qos_seq(mvm_ap_sta, &cmd->common);
out:
rcu_read_unlock();
}
static int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
{
struct iwl_mvm *mvm = IWL_OP_MODE_GET_MVM(op_mode);
......@@ -855,11 +996,14 @@ static int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
struct iwl_d0i3_iter_data d0i3_iter_data = {
.mvm = mvm,
};
struct iwl_wowlan_config_cmd wowlan_config_cmd = {
.wakeup_filter = cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME |
IWL_WOWLAN_WAKEUP_BEACON_MISS |
IWL_WOWLAN_WAKEUP_LINK_CHANGE |
IWL_WOWLAN_WAKEUP_BCN_FILTERING),
struct iwl_wowlan_config_cmd_v3 wowlan_config_cmd = {
.common = {
.wakeup_filter =
cpu_to_le32(IWL_WOWLAN_WAKEUP_RX_FRAME |
IWL_WOWLAN_WAKEUP_BEACON_MISS |
IWL_WOWLAN_WAKEUP_LINK_CHANGE |
IWL_WOWLAN_WAKEUP_BCN_FILTERING),
},
};
struct iwl_d3_manager_config d3_cfg_cmd = {
.min_sleep_time = cpu_to_le32(1000),
......@@ -867,17 +1011,24 @@ static int iwl_mvm_enter_d0i3(struct iwl_op_mode *op_mode)
IWL_DEBUG_RPM(mvm, "MVM entering D0i3\n");
/* make sure we have no running tx while configuring the qos */
set_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
synchronize_net();
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_enter_d0i3_iterator,
&d0i3_iter_data);
if (d0i3_iter_data.vif_count == 1) {
mvm->d0i3_ap_sta_id = d0i3_iter_data.ap_sta_id;
mvm->d0i3_offloading = !d0i3_iter_data.disable_offloading;
} else {
WARN_ON_ONCE(d0i3_iter_data.vif_count > 1);
mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
mvm->d0i3_offloading = false;
}
iwl_mvm_set_wowlan_data(mvm, &wowlan_config_cmd, &d0i3_iter_data);
ret = iwl_mvm_send_cmd_pdu(mvm, WOWLAN_CONFIGURATION, flags,
sizeof(wowlan_config_cmd),
&wowlan_config_cmd);
......@@ -914,6 +1065,62 @@ static void iwl_mvm_d0i3_disconnect_iter(void *data, u8 *mac,
ieee80211_connection_loss(vif);
}
void iwl_mvm_d0i3_enable_tx(struct iwl_mvm *mvm, __le16 *qos_seq)
{
struct ieee80211_sta *sta = NULL;
struct iwl_mvm_sta *mvm_ap_sta;
int i;
bool wake_queues = false;
lockdep_assert_held(&mvm->mutex);
spin_lock_bh(&mvm->d0i3_tx_lock);
if (mvm->d0i3_ap_sta_id == IWL_MVM_STATION_COUNT)
goto out;
IWL_DEBUG_RPM(mvm, "re-enqueue packets\n");
/* get the sta in order to update seq numbers and re-enqueue skbs */
sta = rcu_dereference_protected(
mvm->fw_id_to_mac_id[mvm->d0i3_ap_sta_id],
lockdep_is_held(&mvm->mutex));
if (IS_ERR_OR_NULL(sta)) {
sta = NULL;
goto out;
}
if (mvm->d0i3_offloading && qos_seq) {
/* update qos seq numbers if offloading was enabled */
mvm_ap_sta = (struct iwl_mvm_sta *)sta->drv_priv;
for (i = 0; i < IWL_MAX_TID_COUNT; i++) {
u16 seq = le16_to_cpu(qos_seq[i]);
/* firmware stores last-used one, we store next one */
seq += 0x10;
mvm_ap_sta->tid_data[i].seq_number = seq;
}
}
out:
/* re-enqueue (or drop) all packets */
while (!skb_queue_empty(&mvm->d0i3_tx)) {
struct sk_buff *skb = __skb_dequeue(&mvm->d0i3_tx);
if (!sta || iwl_mvm_tx_skb(mvm, skb, sta))
ieee80211_free_txskb(mvm->hw, skb);
/* if the skb_queue is not empty, we need to wake queues */
wake_queues = true;
}
clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status);
wake_up(&mvm->d0i3_exit_waitq);
mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT;
if (wake_queues)
ieee80211_wake_queues(mvm->hw);
spin_unlock_bh(&mvm->d0i3_tx_lock);
}
static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
{
struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, d0i3_exit_work);
......@@ -924,6 +1131,7 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
struct iwl_wowlan_status_v6 *status;
int ret;
u32 disconnection_reasons, wakeup_reasons;
__le16 *qos_seq = NULL;
mutex_lock(&mvm->mutex);
ret = iwl_mvm_send_cmd(mvm, &get_status_cmd);
......@@ -935,6 +1143,7 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
status = (void *)get_status_cmd.resp_pkt->data;
wakeup_reasons = le32_to_cpu(status->wakeup_reasons);
qos_seq = status->qos_seq_ctr;
IWL_DEBUG_RPM(mvm, "wakeup reasons: 0x%x\n", wakeup_reasons);
......@@ -948,6 +1157,7 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
iwl_free_resp(&get_status_cmd);
out:
iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
mutex_unlock(&mvm->mutex);
}
......
......@@ -511,6 +511,7 @@ int iwl_mvm_power_uapsd_misbehaving_ap_notif(struct iwl_mvm *mvm,
struct iwl_power_constraint {
struct ieee80211_vif *bf_vif;
struct ieee80211_vif *bss_vif;
struct ieee80211_vif *p2p_vif;
u16 bss_phyctx_id;
u16 p2p_phyctx_id;
bool pm_disabled;
......@@ -546,6 +547,10 @@ static void iwl_mvm_power_iterator(void *_data, u8 *mac,
if (mvmvif->phy_ctxt)
power_iterator->p2p_phyctx_id = mvmvif->phy_ctxt->id;
/* we should have only one P2P vif */
WARN_ON(power_iterator->p2p_vif);
power_iterator->p2p_vif = vif;
IWL_DEBUG_POWER(mvm, "p2p: p2p_id=%d, bss_id=%d\n",
power_iterator->p2p_phyctx_id,
power_iterator->bss_phyctx_id);
......@@ -633,16 +638,18 @@ int iwl_mvm_power_update_mac(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
return ret;
}
ret = iwl_mvm_power_send_cmd(mvm, vif);
if (ret)
return ret;
if (constraint.bss_vif && vif != constraint.bss_vif) {
if (constraint.bss_vif) {
ret = iwl_mvm_power_send_cmd(mvm, constraint.bss_vif);
if (ret)
return ret;
}
if (constraint.p2p_vif) {
ret = iwl_mvm_power_send_cmd(mvm, constraint.p2p_vif);
if (ret)
return ret;
}
if (!constraint.bf_vif)
return 0;
......
......@@ -180,7 +180,6 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
.colors = { -1, -1, -1, -1 },
.new_vif = newvif,
};
u32 ll_max_duration;
lockdep_assert_held(&mvm->mutex);
......@@ -199,21 +198,6 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
iwl_mvm_quota_iterator(&data, newvif->addr, newvif);
}
switch (data.n_low_latency_bindings) {
case 0: /* no low latency - use default */
ll_max_duration = 0;
break;
case 1: /* SingleBindingLowLatencyMode */
ll_max_duration = IWL_MVM_LOWLAT_SINGLE_BINDING_MAXDUR;
break;
case 2: /* DualBindingLowLatencyMode */
ll_max_duration = IWL_MVM_LOWLAT_DUAL_BINDING_MAXDUR;
break;
default: /* MultiBindingLowLatencyMode */
ll_max_duration = 0;
break;
}
/*
* The FW's scheduling session consists of
* IWL_MVM_MAX_QUOTA fragments. Divide these fragments
......@@ -278,7 +262,6 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
* binding.
*/
cmd.quotas[idx].quota = cpu_to_le32(QUOTA_LOWLAT_MIN);
else
cmd.quotas[idx].quota =
cpu_to_le32(quota * data.n_interfaces[i]);
......@@ -287,11 +270,7 @@ int iwl_mvm_update_quotas(struct iwl_mvm *mvm, struct ieee80211_vif *newvif)
"Binding=%d, quota=%u > max=%u\n",
idx, le32_to_cpu(cmd.quotas[idx].quota), QUOTA_100);
if (data.n_interfaces[i] && !data.low_latency[i])
cmd.quotas[idx].max_duration =
cpu_to_le32(ll_max_duration);
else
cmd.quotas[idx].max_duration = cpu_to_le32(0);
cmd.quotas[idx].max_duration = cpu_to_le32(0);
idx++;
}
......
......@@ -211,9 +211,9 @@ static const struct rs_tx_column rs_tx_columns[] = {
.next_columns = {
RS_COLUMN_LEGACY_ANT_B,
RS_COLUMN_SISO_ANT_A,
RS_COLUMN_SISO_ANT_B,
RS_COLUMN_MIMO2,
RS_COLUMN_INVALID,
RS_COLUMN_INVALID,
RS_COLUMN_MIMO2_SGI,
},
},
[RS_COLUMN_LEGACY_ANT_B] = {
......@@ -221,10 +221,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
.ant = ANT_B,
.next_columns = {
RS_COLUMN_LEGACY_ANT_A,
RS_COLUMN_SISO_ANT_A,
RS_COLUMN_SISO_ANT_B,
RS_COLUMN_MIMO2,
RS_COLUMN_INVALID,
RS_COLUMN_INVALID,
RS_COLUMN_MIMO2_SGI,
},
},
[RS_COLUMN_SISO_ANT_A] = {
......@@ -234,8 +234,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
RS_COLUMN_SISO_ANT_B,
RS_COLUMN_MIMO2,
RS_COLUMN_SISO_ANT_A_SGI,
RS_COLUMN_INVALID,
RS_COLUMN_INVALID,
RS_COLUMN_SISO_ANT_B_SGI,
RS_COLUMN_MIMO2_SGI,
},
.checks = {
rs_siso_allow,
......@@ -248,8 +248,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
RS_COLUMN_SISO_ANT_A,
RS_COLUMN_MIMO2,
RS_COLUMN_SISO_ANT_B_SGI,
RS_COLUMN_INVALID,
RS_COLUMN_INVALID,
RS_COLUMN_SISO_ANT_A_SGI,
RS_COLUMN_MIMO2_SGI,
},
.checks = {
rs_siso_allow,
......@@ -263,8 +263,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
RS_COLUMN_SISO_ANT_B_SGI,
RS_COLUMN_MIMO2_SGI,
RS_COLUMN_SISO_ANT_A,
RS_COLUMN_INVALID,
RS_COLUMN_INVALID,
RS_COLUMN_SISO_ANT_B,
RS_COLUMN_MIMO2,
},
.checks = {
rs_siso_allow,
......@@ -279,8 +279,8 @@ static const struct rs_tx_column rs_tx_columns[] = {
RS_COLUMN_SISO_ANT_A_SGI,
RS_COLUMN_MIMO2_SGI,
RS_COLUMN_SISO_ANT_B,
RS_COLUMN_INVALID,
RS_COLUMN_INVALID,
RS_COLUMN_SISO_ANT_A,
RS_COLUMN_MIMO2,
},
.checks = {
rs_siso_allow,
......@@ -292,10 +292,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
.ant = ANT_AB,
.next_columns = {
RS_COLUMN_SISO_ANT_A,
RS_COLUMN_SISO_ANT_B,
RS_COLUMN_SISO_ANT_A_SGI,
RS_COLUMN_SISO_ANT_B_SGI,
RS_COLUMN_MIMO2_SGI,
RS_COLUMN_INVALID,
RS_COLUMN_INVALID,
RS_COLUMN_INVALID,
},
.checks = {
rs_mimo_allow,
......@@ -307,10 +307,10 @@ static const struct rs_tx_column rs_tx_columns[] = {
.sgi = true,
.next_columns = {
RS_COLUMN_SISO_ANT_A_SGI,
RS_COLUMN_SISO_ANT_B_SGI,
RS_COLUMN_SISO_ANT_A,
RS_COLUMN_SISO_ANT_B,
RS_COLUMN_MIMO2,
RS_COLUMN_INVALID,
RS_COLUMN_INVALID,
RS_COLUMN_INVALID,
},
.checks = {
rs_mimo_allow,
......@@ -503,6 +503,14 @@ static void rs_rate_scale_clear_window(struct iwl_rate_scale_data *window)
window->average_tpt = IWL_INVALID_VALUE;
}
static void rs_rate_scale_clear_tbl_windows(struct iwl_scale_tbl_info *tbl)
{
int i;
for (i = 0; i < IWL_RATE_COUNT; i++)
rs_rate_scale_clear_window(&tbl->win[i]);
}
static inline u8 rs_is_valid_ant(u8 valid_antenna, u8 ant_type)
{
return (ant_type & valid_antenna) == ant_type;
......@@ -566,19 +574,13 @@ static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
* at this rate. window->data contains the bitmask of successful
* packets.
*/
static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
int scale_index, int attempts, int successes)
static int _rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
int scale_index, int attempts, int successes,
struct iwl_rate_scale_data *window)
{
struct iwl_rate_scale_data *window = NULL;
static const u64 mask = (((u64)1) << (IWL_RATE_MAX_WINDOW - 1));
s32 fail_count, tpt;
if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
return -EINVAL;
/* Select window for current tx bit rate */
window = &(tbl->win[scale_index]);
/* Get expected throughput */
tpt = get_expected_tpt(tbl, scale_index);
......@@ -636,6 +638,21 @@ static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
return 0;
}
static int rs_collect_tx_data(struct iwl_scale_tbl_info *tbl,
int scale_index, int attempts, int successes)
{
struct iwl_rate_scale_data *window = NULL;
if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
return -EINVAL;
/* Select window for current tx bit rate */
window = &(tbl->win[scale_index]);
return _rs_collect_tx_data(tbl, scale_index, attempts, successes,
window);
}
/* Convert rs_rate object into ucode rate bitmask */
static u32 ucode_rate_from_rs_rate(struct iwl_mvm *mvm,
struct rs_rate *rate)
......@@ -1361,7 +1378,6 @@ static u32 rs_bw_from_sta_bw(struct ieee80211_sta *sta)
static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
{
struct iwl_scale_tbl_info *tbl;
int i;
int active_tbl;
int flush_interval_passed = 0;
struct iwl_mvm *mvm;
......@@ -1422,9 +1438,7 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
IWL_DEBUG_RATE(mvm,
"LQ: stay in table clear win\n");
for (i = 0; i < IWL_RATE_COUNT; i++)
rs_rate_scale_clear_window(
&(tbl->win[i]));
rs_rate_scale_clear_tbl_windows(tbl);
}
}
......@@ -1433,8 +1447,7 @@ static void rs_stay_in_table(struct iwl_lq_sta *lq_sta, bool force_search)
* "search" table). */
if (lq_sta->rs_state == RS_STATE_SEARCH_CYCLE_STARTED) {
IWL_DEBUG_RATE(mvm, "Clearing up window stats\n");
for (i = 0; i < IWL_RATE_COUNT; i++)
rs_rate_scale_clear_window(&(tbl->win[i]));
rs_rate_scale_clear_tbl_windows(tbl);
}
}
}
......@@ -1724,7 +1737,6 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
int low = IWL_RATE_INVALID;
int high = IWL_RATE_INVALID;
int index;
int i;
struct iwl_rate_scale_data *window = NULL;
int current_tpt = IWL_INVALID_VALUE;
int low_tpt = IWL_INVALID_VALUE;
......@@ -2009,8 +2021,7 @@ static void rs_rate_scale_perform(struct iwl_mvm *mvm,
if (lq_sta->search_better_tbl) {
/* Access the "search" table, clear its history. */
tbl = &(lq_sta->lq_info[(1 - lq_sta->active_tbl)]);
for (i = 0; i < IWL_RATE_COUNT; i++)
rs_rate_scale_clear_window(&(tbl->win[i]));
rs_rate_scale_clear_tbl_windows(tbl);
/* Use new "search" start rate */
index = tbl->rate.index;
......@@ -2331,8 +2342,7 @@ void iwl_mvm_rs_rate_init(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
lq_sta->lq.sta_id = sta_priv->sta_id;
for (j = 0; j < LQ_SIZE; j++)
for (i = 0; i < IWL_RATE_COUNT; i++)
rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
rs_rate_scale_clear_tbl_windows(&lq_sta->lq_info[j]);
lq_sta->flush_timer = 0;
......@@ -2591,7 +2601,7 @@ static void rs_fill_lq_cmd(struct iwl_mvm *mvm,
if (sta)
lq_cmd->agg_time_limit =
cpu_to_le16(iwl_mvm_bt_coex_agg_time_limit(mvm, sta));
cpu_to_le16(iwl_mvm_coex_agg_time_limit(mvm, sta));
}
static void *rs_alloc(struct ieee80211_hw *hw, struct dentry *debugfsdir)
......
......@@ -70,9 +70,16 @@
#define IWL_PLCP_QUIET_THRESH 1
#define IWL_ACTIVE_QUIET_TIME 10
#define LONG_OUT_TIME_PERIOD 600
#define SHORT_OUT_TIME_PERIOD 200
#define SUSPEND_TIME_PERIOD 100
struct iwl_mvm_scan_params {
u32 max_out_time;
u32 suspend_time;
bool passive_fragmented;
struct _dwell {
u16 passive;
u16 active;
} dwell[IEEE80211_NUM_BANDS];
};
static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
{
......@@ -90,24 +97,6 @@ static inline __le16 iwl_mvm_scan_rx_chain(struct iwl_mvm *mvm)
return cpu_to_le16(rx_chain);
}
static inline __le32 iwl_mvm_scan_max_out_time(struct ieee80211_vif *vif,
u32 flags, bool is_assoc)
{
if (!is_assoc)
return 0;
if (flags & NL80211_SCAN_FLAG_LOW_PRIORITY)
return cpu_to_le32(ieee80211_tu_to_usec(SHORT_OUT_TIME_PERIOD));
return cpu_to_le32(ieee80211_tu_to_usec(LONG_OUT_TIME_PERIOD));
}
static inline __le32 iwl_mvm_scan_suspend_time(struct ieee80211_vif *vif,
bool is_assoc)
{
if (!is_assoc)
return 0;
return cpu_to_le32(ieee80211_tu_to_usec(SUSPEND_TIME_PERIOD));
}
static inline __le32
iwl_mvm_scan_rxon_flags(struct cfg80211_scan_request *req)
{
......@@ -181,15 +170,14 @@ static u16 iwl_mvm_get_passive_dwell(enum ieee80211_band band)
static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
struct cfg80211_scan_request *req,
bool basic_ssid)
bool basic_ssid,
struct iwl_mvm_scan_params *params)
{
u16 passive_dwell = iwl_mvm_get_passive_dwell(req->channels[0]->band);
u16 active_dwell = iwl_mvm_get_active_dwell(req->channels[0]->band,
req->n_ssids);
struct iwl_scan_channel *chan = (struct iwl_scan_channel *)
(cmd->data + le16_to_cpu(cmd->tx_cmd.len));
int i;
int type = BIT(req->n_ssids) - 1;
enum ieee80211_band band = req->channels[0]->band;
if (!basic_ssid)
type |= BIT(req->n_ssids);
......@@ -199,8 +187,8 @@ static void iwl_mvm_scan_fill_channels(struct iwl_scan_cmd *cmd,
chan->type = cpu_to_le32(type);
if (req->channels[i]->flags & IEEE80211_CHAN_NO_IR)
chan->type &= cpu_to_le32(~SCAN_CHANNEL_TYPE_ACTIVE);
chan->active_dwell = cpu_to_le16(active_dwell);
chan->passive_dwell = cpu_to_le16(passive_dwell);
chan->active_dwell = cpu_to_le16(params->dwell[band].active);
chan->passive_dwell = cpu_to_le16(params->dwell[band].passive);
chan->iteration_count = cpu_to_le16(1);
chan++;
}
......@@ -267,13 +255,76 @@ static u16 iwl_mvm_fill_probe_req(struct ieee80211_mgmt *frame, const u8 *ta,
return (u16)len;
}
static void iwl_mvm_vif_assoc_iterator(void *data, u8 *mac,
struct ieee80211_vif *vif)
static void iwl_mvm_scan_condition_iterator(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
bool *is_assoc = data;
struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
bool *global_bound = data;
if (vif->bss_conf.assoc)
*is_assoc = true;
if (mvmvif->phy_ctxt && mvmvif->phy_ctxt->id < MAX_PHYS)
*global_bound = true;
}
static void iwl_mvm_scan_calc_params(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
int n_ssids,
struct iwl_mvm_scan_params *params)
{
bool global_bound = false;
enum ieee80211_band band;
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_scan_condition_iterator,
&global_bound);
/*
* Under low latency traffic passive scan is fragmented meaning
* that dwell on a particular channel will be fragmented. Each fragment
* dwell time is 20ms and fragments period is 105ms. Skipping to next
* channel will be delayed by the same period - 105ms. So suspend_time
* parameter describing both fragments and channels skipping periods is
* set to 105ms. This value is chosen so that overall passive scan
* duration will not be too long. Max_out_time in this case is set to
* 70ms, so for active scanning operating channel will be left for 70ms
* while for passive still for 20ms (fragment dwell).
*/
if (global_bound) {
if (!iwl_mvm_low_latency(mvm)) {
params->suspend_time = ieee80211_tu_to_usec(100);
params->max_out_time = ieee80211_tu_to_usec(600);
} else {
params->suspend_time = ieee80211_tu_to_usec(105);
/* P2P doesn't support fragmented passive scan, so
* configure max_out_time to be at least longest dwell
* time for passive scan.
*/
if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p) {
params->max_out_time = ieee80211_tu_to_usec(70);
params->passive_fragmented = true;
} else {
u32 passive_dwell;
/*
* Use band G so that passive channel dwell time
* will be assigned with maximum value.
*/
band = IEEE80211_BAND_2GHZ;
passive_dwell = iwl_mvm_get_passive_dwell(band);
params->max_out_time =
ieee80211_tu_to_usec(passive_dwell);
}
}
}
for (band = IEEE80211_BAND_2GHZ; band < IEEE80211_NUM_BANDS; band++) {
if (params->passive_fragmented)
params->dwell[band].passive = 20;
else
params->dwell[band].passive =
iwl_mvm_get_passive_dwell(band);
params->dwell[band].active = iwl_mvm_get_active_dwell(band,
n_ssids);
}
}
int iwl_mvm_scan_request(struct iwl_mvm *mvm,
......@@ -288,13 +339,13 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
.dataflags = { IWL_HCMD_DFL_NOCOPY, },
};
struct iwl_scan_cmd *cmd = mvm->scan_cmd;
bool is_assoc = false;
int ret;
u32 status;
int ssid_len = 0;
u8 *ssid = NULL;
bool basic_ssid = !(mvm->fw->ucode_capa.flags &
IWL_UCODE_TLV_FLAGS_NO_BASIC_SSID);
struct iwl_mvm_scan_params params = {};
lockdep_assert_held(&mvm->mutex);
BUG_ON(mvm->scan_cmd == NULL);
......@@ -304,17 +355,18 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
memset(cmd, 0, sizeof(struct iwl_scan_cmd) +
mvm->fw->ucode_capa.max_probe_length +
(MAX_NUM_SCAN_CHANNELS * sizeof(struct iwl_scan_channel)));
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_vif_assoc_iterator,
&is_assoc);
cmd->channel_count = (u8)req->n_channels;
cmd->quiet_time = cpu_to_le16(IWL_ACTIVE_QUIET_TIME);
cmd->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
cmd->rxchain_sel_flags = iwl_mvm_scan_rx_chain(mvm);
cmd->max_out_time = iwl_mvm_scan_max_out_time(vif, req->flags,
is_assoc);
cmd->suspend_time = iwl_mvm_scan_suspend_time(vif, is_assoc);
iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, &params);
cmd->max_out_time = cpu_to_le32(params.max_out_time);
cmd->suspend_time = cpu_to_le32(params.suspend_time);
if (params.passive_fragmented)
cmd->scan_flags |= SCAN_FLAGS_FRAGMENTED_SCAN;
cmd->rxon_flags = iwl_mvm_scan_rxon_flags(req);
cmd->filter_flags = cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
MAC_FILTER_IN_BEACON);
......@@ -360,7 +412,7 @@ int iwl_mvm_scan_request(struct iwl_mvm *mvm,
req->ie, req->ie_len,
mvm->fw->ucode_capa.max_probe_length));
iwl_mvm_scan_fill_channels(cmd, req, basic_ssid);
iwl_mvm_scan_fill_channels(cmd, req, basic_ssid, &params);
cmd->len = cpu_to_le16(sizeof(struct iwl_scan_cmd) +
le16_to_cpu(cmd->tx_cmd.len) +
......@@ -402,10 +454,13 @@ int iwl_mvm_rx_scan_complete(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb,
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_scan_complete_notif *notif = (void *)pkt->data;
lockdep_assert_held(&mvm->mutex);
IWL_DEBUG_SCAN(mvm, "Scan complete: status=0x%x scanned channels=%d\n",
notif->status, notif->scanned_channels);
mvm->scan_status = IWL_MVM_SCAN_NONE;
if (mvm->scan_status == IWL_MVM_SCAN_OS)
mvm->scan_status = IWL_MVM_SCAN_NONE;
ieee80211_scan_completed(mvm->hw, notif->status != SCAN_COMP_STATUS_OK);
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
......@@ -466,7 +521,7 @@ static bool iwl_mvm_scan_abort_notif(struct iwl_notif_wait_data *notif_wait,
};
}
void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
int iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
{
struct iwl_notification_wait wait_scan_abort;
static const u8 scan_abort_notif[] = { SCAN_ABORT_CMD,
......@@ -474,13 +529,13 @@ void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
int ret;
if (mvm->scan_status == IWL_MVM_SCAN_NONE)
return;
return 0;
if (iwl_mvm_is_radio_killed(mvm)) {
ieee80211_scan_completed(mvm->hw, true);
iwl_mvm_unref(mvm, IWL_MVM_REF_SCAN);
mvm->scan_status = IWL_MVM_SCAN_NONE;
return;
return 0;
}
iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_abort,
......@@ -495,14 +550,11 @@ void iwl_mvm_cancel_scan(struct iwl_mvm *mvm)
goto out_remove_notif;
}
ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_abort, 1 * HZ);
if (ret)
IWL_ERR(mvm, "%s - failed on timeout\n", __func__);
return;
return iwl_wait_notification(&mvm->notif_wait, &wait_scan_abort, HZ);
out_remove_notif:
iwl_remove_notification(&mvm->notif_wait, &wait_scan_abort);
return ret;
}
int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
......@@ -519,10 +571,11 @@ int iwl_mvm_rx_scan_offload_complete_notif(struct iwl_mvm *mvm,
scan_notif->status == IWL_SCAN_OFFLOAD_COMPLETED ?
"completed" : "aborted");
/* might already be something else again, don't reset if so */
if (mvm->scan_status == IWL_MVM_SCAN_SCHED)
/* only call mac80211 completion if the stop was initiated by FW */
if (mvm->scan_status == IWL_MVM_SCAN_SCHED) {
mvm->scan_status = IWL_MVM_SCAN_NONE;
ieee80211_sched_scan_stopped(mvm->hw);
ieee80211_sched_scan_stopped(mvm->hw);
}
return 0;
}
......@@ -553,14 +606,9 @@ static void iwl_scan_offload_build_tx_cmd(struct iwl_mvm *mvm,
static void iwl_build_scan_cmd(struct iwl_mvm *mvm,
struct ieee80211_vif *vif,
struct cfg80211_sched_scan_request *req,
struct iwl_scan_offload_cmd *scan)
struct iwl_scan_offload_cmd *scan,
struct iwl_mvm_scan_params *params)
{
bool is_assoc = false;
ieee80211_iterate_active_interfaces_atomic(mvm->hw,
IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_vif_assoc_iterator,
&is_assoc);
scan->channel_count =
mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels +
mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
......@@ -568,13 +616,17 @@ static void iwl_build_scan_cmd(struct iwl_mvm *mvm,
scan->quiet_plcp_th = cpu_to_le16(IWL_PLCP_QUIET_THRESH);
scan->good_CRC_th = IWL_GOOD_CRC_TH_DEFAULT;
scan->rx_chain = iwl_mvm_scan_rx_chain(mvm);
scan->max_out_time = iwl_mvm_scan_max_out_time(vif, req->flags,
is_assoc);
scan->suspend_time = iwl_mvm_scan_suspend_time(vif, is_assoc);
scan->max_out_time = cpu_to_le32(params->max_out_time);
scan->suspend_time = cpu_to_le32(params->suspend_time);
scan->filter_flags |= cpu_to_le32(MAC_FILTER_ACCEPT_GRP |
MAC_FILTER_IN_BEACON);
scan->scan_type = cpu_to_le32(SCAN_TYPE_BACKGROUND);
scan->rep_count = cpu_to_le32(1);
if (params->passive_fragmented)
scan->scan_flags |= SCAN_FLAGS_FRAGMENTED_SCAN;
}
static int iwl_ssid_exist(u8 *ssid, u8 ssid_len, struct iwl_ssid_ie *ssid_list)
......@@ -639,12 +691,11 @@ static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
struct iwl_scan_channel_cfg *channels,
enum ieee80211_band band,
int *head, int *tail,
u32 ssid_bitmap)
u32 ssid_bitmap,
struct iwl_mvm_scan_params *params)
{
struct ieee80211_supported_band *s_band;
int n_probes = req->n_ssids;
int n_channels = req->n_channels;
u8 active_dwell, passive_dwell;
int i, j, index = 0;
bool partial;
......@@ -654,8 +705,6 @@ static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
* to scan. So add requested channels to head of the list and others to
* the end.
*/
active_dwell = iwl_mvm_get_active_dwell(band, n_probes);
passive_dwell = iwl_mvm_get_passive_dwell(band);
s_band = &mvm->nvm_data->bands[band];
for (i = 0; i < s_band->n_channels && *head <= *tail; i++) {
......@@ -679,8 +728,8 @@ static void iwl_build_channel_cfg(struct iwl_mvm *mvm,
channels->channel_number[index] =
cpu_to_le16(ieee80211_frequency_to_channel(
s_band->channels[i].center_freq));
channels->dwell_time[index][0] = active_dwell;
channels->dwell_time[index][1] = passive_dwell;
channels->dwell_time[index][0] = params->dwell[band].active;
channels->dwell_time[index][1] = params->dwell[band].passive;
channels->iter_count[index] = cpu_to_le16(1);
channels->iter_interval[index] = 0;
......@@ -709,7 +758,6 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
struct cfg80211_sched_scan_request *req,
struct ieee80211_sched_scan_ies *ies)
{
int supported_bands = 0;
int band_2ghz = mvm->nvm_data->bands[IEEE80211_BAND_2GHZ].n_channels;
int band_5ghz = mvm->nvm_data->bands[IEEE80211_BAND_5GHZ].n_channels;
int head = 0;
......@@ -723,22 +771,19 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
.id = SCAN_OFFLOAD_CONFIG_CMD,
.flags = CMD_SYNC,
};
struct iwl_mvm_scan_params params = {};
lockdep_assert_held(&mvm->mutex);
if (band_2ghz)
supported_bands++;
if (band_5ghz)
supported_bands++;
cmd_len = sizeof(struct iwl_scan_offload_cfg) +
supported_bands * SCAN_OFFLOAD_PROBE_REQ_SIZE;
2 * SCAN_OFFLOAD_PROBE_REQ_SIZE;
scan_cfg = kzalloc(cmd_len, GFP_KERNEL);
if (!scan_cfg)
return -ENOMEM;
iwl_build_scan_cmd(mvm, vif, req, &scan_cfg->scan_cmd);
iwl_mvm_scan_calc_params(mvm, vif, req->n_ssids, &params);
iwl_build_scan_cmd(mvm, vif, req, &scan_cfg->scan_cmd, &params);
scan_cfg->scan_cmd.len = cpu_to_le16(cmd_len);
iwl_scan_offload_build_ssid(req, &scan_cfg->scan_cmd, &ssid_bitmap);
......@@ -750,7 +795,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
scan_cfg->data);
iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
IEEE80211_BAND_2GHZ, &head, &tail,
ssid_bitmap);
ssid_bitmap, &params);
}
if (band_5ghz) {
iwl_scan_offload_build_tx_cmd(mvm, vif, ies,
......@@ -760,7 +805,7 @@ int iwl_mvm_config_sched_scan(struct iwl_mvm *mvm,
SCAN_OFFLOAD_PROBE_REQ_SIZE);
iwl_build_channel_cfg(mvm, req, &scan_cfg->channel_cfg,
IEEE80211_BAND_5GHZ, &head, &tail,
ssid_bitmap);
ssid_bitmap, &params);
}
cmd.data[0] = scan_cfg;
......@@ -900,26 +945,49 @@ static int iwl_mvm_send_sched_scan_abort(struct iwl_mvm *mvm)
* microcode has notified us that a scan is completed.
*/
IWL_DEBUG_SCAN(mvm, "SCAN OFFLOAD ABORT ret %d.\n", status);
ret = -EIO;
ret = -ENOENT;
}
return ret;
}
void iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm)
int iwl_mvm_sched_scan_stop(struct iwl_mvm *mvm)
{
int ret;
struct iwl_notification_wait wait_scan_done;
static const u8 scan_done_notif[] = { SCAN_OFFLOAD_COMPLETE, };
lockdep_assert_held(&mvm->mutex);
if (mvm->scan_status != IWL_MVM_SCAN_SCHED) {
IWL_DEBUG_SCAN(mvm, "No offloaded scan to stop\n");
return;
return 0;
}
iwl_init_notification_wait(&mvm->notif_wait, &wait_scan_done,
scan_done_notif,
ARRAY_SIZE(scan_done_notif),
NULL, NULL);
ret = iwl_mvm_send_sched_scan_abort(mvm);
if (ret)
if (ret) {
IWL_DEBUG_SCAN(mvm, "Send stop offload scan failed %d\n", ret);
else
IWL_DEBUG_SCAN(mvm, "Successfully sent stop offload scan\n");
iwl_remove_notification(&mvm->notif_wait, &wait_scan_done);
return ret;
}
IWL_DEBUG_SCAN(mvm, "Successfully sent stop offload scan\n");
ret = iwl_wait_notification(&mvm->notif_wait, &wait_scan_done, 1 * HZ);
if (ret)
return ret;
/*
* Clear the scan status so the next scan requests will succeed. This
* also ensures the Rx handler doesn't do anything, as the scan was
* stopped from above.
*/
mvm->scan_status = IWL_MVM_SCAN_NONE;
return 0;
}
......@@ -851,7 +851,7 @@ static int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
return ret;
}
static const u8 tid_to_mac80211_ac[] = {
const u8 tid_to_mac80211_ac[] = {
IEEE80211_AC_BE,
IEEE80211_AC_BK,
IEEE80211_AC_BK,
......@@ -902,10 +902,18 @@ int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return -EIO;
}
spin_lock_bh(&mvmsta->lock);
/* possible race condition - we entered D0i3 while starting agg */
if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) {
spin_unlock_bh(&mvmsta->lock);
IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n");
return -EIO;
}
/* the new tx queue is still connected to the same mac80211 queue */
mvm->queue_to_mac80211[txq_id] = vif->hw_queue[tid_to_mac80211_ac[tid]];
spin_lock_bh(&mvmsta->lock);
tid_data = &mvmsta->tid_data[tid];
tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
tid_data->txq_id = txq_id;
......
......@@ -79,6 +79,7 @@ static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
__le16 fc = hdr->frame_control;
u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
u32 len = skb->len + FCS_LEN;
u8 ac;
if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
tx_flags |= TX_CMD_FLG_ACK;
......@@ -90,13 +91,6 @@ static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
else if (ieee80211_is_back_req(fc))
tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
/* High prio packet (wrt. BT coex) if it is EAPOL, MCAST or MGMT */
if (info->band == IEEE80211_BAND_2GHZ &&
(info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO ||
is_multicast_ether_addr(hdr->addr1) ||
ieee80211_is_back_req(fc) || ieee80211_is_mgmt(fc)))
tx_flags |= TX_CMD_FLG_BT_DIS;
if (ieee80211_has_morefrags(fc))
tx_flags |= TX_CMD_FLG_MORE_FRAG;
......@@ -112,6 +106,11 @@ static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
}
/* tid_tspec will default to 0 = BE when QOS isn't enabled */
ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) <<
TX_CMD_FLG_BT_PRIO_POS;
if (ieee80211_is_mgmt(fc)) {
if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
tx_cmd->pm_frame_timeout = cpu_to_le16(3);
......@@ -128,9 +127,6 @@ static void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
tx_cmd->pm_frame_timeout = 0;
}
if (info->flags & IEEE80211_TX_CTL_AMPDU)
tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&
!is_multicast_ether_addr(ieee80211_get_DA(hdr)))
tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
......
......@@ -516,33 +516,26 @@ void iwl_mvm_dump_nic_error_log(struct iwl_mvm *mvm)
iwl_mvm_dump_umac_error_log(mvm);
}
void iwl_mvm_dump_sram(struct iwl_mvm *mvm)
void iwl_mvm_fw_error_sram_dump(struct iwl_mvm *mvm)
{
const struct fw_img *img;
int ofs, len = 0;
int i;
__le32 *buf;
u32 ofs, sram_len;
void *sram;
if (!mvm->ucode_loaded)
if (!mvm->ucode_loaded || mvm->fw_error_sram)
return;
img = &mvm->fw->img[mvm->cur_ucode];
ofs = img->sec[IWL_UCODE_SECTION_DATA].offset;
len = img->sec[IWL_UCODE_SECTION_DATA].len;
sram_len = img->sec[IWL_UCODE_SECTION_DATA].len;
buf = kzalloc(len, GFP_ATOMIC);
if (!buf)
sram = kzalloc(sram_len, GFP_ATOMIC);
if (!sram)
return;
iwl_trans_read_mem_bytes(mvm->trans, ofs, buf, len);
len = len >> 2;
for (i = 0; i < len; i++) {
IWL_ERR(mvm, "0x%08X\n", le32_to_cpu(buf[i]));
/* Add a small delay to let syslog catch up */
udelay(10);
}
kfree(buf);
iwl_trans_read_mem_bytes(mvm->trans, ofs, sram, sram_len);
mvm->fw_error_sram = sram;
mvm->fw_error_sram_len = sram_len;
}
/**
......@@ -619,6 +612,9 @@ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
lockdep_assert_held(&mvm->mutex);
if (mvmvif->low_latency == value)
return 0;
mvmvif->low_latency = value;
res = iwl_mvm_update_quotas(mvm, NULL);
......@@ -629,3 +625,22 @@ int iwl_mvm_update_low_latency(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
return iwl_mvm_power_update_mac(mvm, vif);
}
static void iwl_mvm_ll_iter(void *_data, u8 *mac, struct ieee80211_vif *vif)
{
bool *result = _data;
if (iwl_mvm_vif_low_latency(iwl_mvm_vif_from_mac80211(vif)))
*result = true;
}
bool iwl_mvm_low_latency(struct iwl_mvm *mvm)
{
bool result = false;
ieee80211_iterate_active_interfaces_atomic(
mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
iwl_mvm_ll_iter, &result);
return result;
}
......@@ -447,7 +447,8 @@ static void set_dflt_pwr_limit(struct iwl_trans *trans, struct pci_dev *pdev)
pxsx_handle = ACPI_HANDLE(&pdev->dev);
if (!pxsx_handle) {
IWL_ERR(trans, "Could not retrieve root port ACPI handle");
IWL_DEBUG_INFO(trans,
"Could not retrieve root port ACPI handle");
return;
}
......@@ -559,7 +560,7 @@ static int iwl_pci_resume(struct device *device)
iwl_enable_rfkill_int(trans);
hw_rfkill = iwl_is_rfkill_set(trans);
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
return 0;
}
......
......@@ -488,4 +488,6 @@ static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
__iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
}
void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state);
#endif /* __iwl_trans_int_pcie_h__ */
......@@ -994,7 +994,7 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
isr_stats->rfkill++;
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
if (hw_rfkill) {
set_bit(STATUS_RFKILL, &trans->status);
if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
......
......@@ -75,6 +75,20 @@
#include "iwl-agn-hw.h"
#include "internal.h"
static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
{
iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
((reg & 0x0000ffff) | (2 << 28)));
return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
}
static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
{
iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val);
iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
((reg & 0x0000ffff) | (3 << 28)));
}
static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
{
if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
......@@ -229,6 +243,116 @@ static int iwl_pcie_apm_init(struct iwl_trans *trans)
return ret;
}
/*
* Enable LP XTAL to avoid HW bug where device may consume much power if
* FW is not loaded after device reset. LP XTAL is disabled by default
* after device HW reset. Do it only if XTAL is fed by internal source.
* Configure device's "persistence" mode to avoid resetting XTAL again when
* SHRD_HW_RST occurs in S3.
*/
static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
{
int ret;
u32 apmg_gp1_reg;
u32 apmg_xtal_cfg_reg;
u32 dl_cfg_reg;
/* Force XTAL ON */
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
/* Reset entire device - do controller reset (results in SHRD_HW_RST) */
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
udelay(10);
/*
* Set "initialization complete" bit to move adapter from
* D0U* --> D0A* (powered-up active) state.
*/
iwl_set_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
/*
* Wait for clock stabilization; once stabilized, access to
* device-internal resources is possible.
*/
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
25000);
if (WARN_ON(ret < 0)) {
IWL_ERR(trans, "Access time out - failed to enable LP XTAL\n");
/* Release XTAL ON request */
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
return;
}
/*
* Clear "disable persistence" to avoid LP XTAL resetting when
* SHRD_HW_RST is applied in S3.
*/
iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
APMG_PCIDEV_STT_VAL_PERSIST_DIS);
/*
* Force APMG XTAL to be active to prevent its disabling by HW
* caused by APMG idle state.
*/
apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,
SHR_APMG_XTAL_CFG_REG);
iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
apmg_xtal_cfg_reg |
SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
/*
* Reset entire device again - do controller reset (results in
* SHRD_HW_RST). Turn MAC off before proceeding.
*/
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
udelay(10);
/* Enable LP XTAL by indirect access through CSR */
apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg |
SHR_APMG_GP1_WF_XTAL_LP_EN |
SHR_APMG_GP1_CHICKEN_BIT_SELECT);
/* Clear delay line clock power up */
dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG);
iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg &
~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP);
/*
* Enable persistence mode to avoid LP XTAL resetting when
* SHRD_HW_RST is applied in S3.
*/
iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
/*
* Clear "initialization complete" bit to move adapter from
* D0A* (powered-up Active) --> D0U* (Uninitialized) state.
*/
iwl_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
/* Activates XTAL resources monitor */
__iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
CSR_MONITOR_XTAL_RESOURCES);
/* Release XTAL ON request */
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
udelay(10);
/* Release APMG XTAL */
iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
apmg_xtal_cfg_reg &
~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
}
static int iwl_pcie_apm_stop_master(struct iwl_trans *trans)
{
int ret = 0;
......@@ -256,6 +380,11 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans)
/* Stop device's DMA activity */
iwl_pcie_apm_stop_master(trans);
if (trans->cfg->lp_xtal_workaround) {
iwl_pcie_apm_lp_xtal_enable(trans);
return;
}
/* Reset the entire device */
iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
......@@ -641,7 +770,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
set_bit(STATUS_RFKILL, &trans->status);
else
clear_bit(STATUS_RFKILL, &trans->status);
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
if (hw_rfkill && !run_in_rfkill)
return -ERFKILL;
......@@ -756,7 +885,13 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
else
clear_bit(STATUS_RFKILL, &trans->status);
if (hw_rfkill != was_hw_rfkill)
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
}
void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
{
if (iwl_op_mode_hw_rf_kill(trans->op_mode, state))
iwl_trans_pcie_stop_device(trans);
}
static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
......@@ -865,7 +1000,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
set_bit(STATUS_RFKILL, &trans->status);
else
clear_bit(STATUS_RFKILL, &trans->status);
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
iwl_trans_pcie_rf_kill(trans, hw_rfkill);
return 0;
}
......@@ -1208,6 +1343,7 @@ static const char *get_csr_string(int cmd)
IWL_CMD(CSR_GIO_CHICKEN_BITS);
IWL_CMD(CSR_ANA_PLL_CFG);
IWL_CMD(CSR_HW_REV_WA_REG);
IWL_CMD(CSR_MONITOR_STATUS_REG);
IWL_CMD(CSR_DBG_HPET_MEM_REG);
default:
return "UNKNOWN";
......@@ -1240,6 +1376,7 @@ void iwl_pcie_dump_csr(struct iwl_trans *trans)
CSR_DRAM_INT_TBL_REG,
CSR_GIO_CHICKEN_BITS,
CSR_ANA_PLL_CFG,
CSR_MONITOR_STATUS_REG,
CSR_HW_REV_WA_REG,
CSR_DBG_HPET_MEM_REG
};
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment