Commit 6833d070 authored by Kalle Valo's avatar Kalle Valo

Merge tag 'iwlwifi-next-for-kalle-2016-09-15-2' of...

Merge tag 'iwlwifi-next-for-kalle-2016-09-15-2' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next

* work for new hardware support continues
* dynamic queue allocation stabilization
* improvements in the MSIx code
* multiqueue support work continues
* new firmware version support
* general cleanups and improvements
parents fd9527f4 fd659f8e
......@@ -46,15 +46,6 @@
*
******************************************************************************/
static inline const struct fw_img *
iwl_get_ucode_image(struct iwl_priv *priv, enum iwl_ucode_type ucode_type)
{
if (ucode_type >= IWL_UCODE_TYPE_MAX)
return NULL;
return &priv->fw->img[ucode_type];
}
/*
* Calibration
*/
......@@ -330,7 +321,7 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
enum iwl_ucode_type old_type;
static const u16 alive_cmd[] = { REPLY_ALIVE };
fw = iwl_get_ucode_image(priv, ucode_type);
fw = iwl_get_ucode_image(priv->fw, ucode_type);
if (WARN_ON(!fw))
return -EINVAL;
......
......@@ -73,8 +73,8 @@
/* Highest firmware API version supported */
#define IWL7260_UCODE_API_MAX 17
#define IWL7265_UCODE_API_MAX 17
#define IWL7265D_UCODE_API_MAX 24
#define IWL3168_UCODE_API_MAX 24
#define IWL7265D_UCODE_API_MAX 26
#define IWL3168_UCODE_API_MAX 26
/* Lowest firmware API version supported */
#define IWL7260_UCODE_API_MIN 16
......
......@@ -70,8 +70,8 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
#define IWL8000_UCODE_API_MAX 24
#define IWL8265_UCODE_API_MAX 24
#define IWL8000_UCODE_API_MAX 26
#define IWL8265_UCODE_API_MAX 26
/* Lowest firmware API version supported */
#define IWL8000_UCODE_API_MIN 16
......@@ -212,6 +212,17 @@ const struct iwl_cfg iwl8265_2ac_cfg = {
.vht_mu_mimo_supported = true,
};
const struct iwl_cfg iwl8275_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 8275",
.fw_name_pre = IWL8265_FW_PRE,
IWL_DEVICE_8265,
.ht_params = &iwl8000_ht_params,
.nvm_ver = IWL8000_NVM_VERSION,
.nvm_calib_ver = IWL8000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
.vht_mu_mimo_supported = true,
};
const struct iwl_cfg iwl4165_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 4165",
.fw_name_pre = IWL8000_FW_PRE,
......
......@@ -55,7 +55,7 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
#define IWL9000_UCODE_API_MAX 24
#define IWL9000_UCODE_API_MAX 26
/* Lowest firmware API version supported */
#define IWL9000_UCODE_API_MIN 16
......@@ -187,6 +187,17 @@ const struct iwl_cfg iwl9460_2ac_cfg = {
.integrated = true,
};
const struct iwl_cfg iwl9560_2ac_cfg = {
.name = "Intel(R) Dual Band Wireless AC 9560",
.fw_name_pre = IWL9000_FW_PRE,
IWL_DEVICE_9000,
.ht_params = &iwl9000_ht_params,
.nvm_ver = IWL9000_NVM_VERSION,
.nvm_calib_ver = IWL9000_TX_POWER_VERSION,
.max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K,
.integrated = true,
};
/*
* TODO the struct below is for internal testing only this should be
* removed by EO 2016~
......
......@@ -55,7 +55,7 @@
#include "iwl-agn-hw.h"
/* Highest firmware API version supported */
#define IWL_A000_UCODE_API_MAX 24
#define IWL_A000_UCODE_API_MAX 26
/* Lowest firmware API version supported */
#define IWL_A000_UCODE_API_MIN 24
......
......@@ -445,6 +445,7 @@ extern const struct iwl_cfg iwl7265d_n_cfg;
extern const struct iwl_cfg iwl8260_2n_cfg;
extern const struct iwl_cfg iwl8260_2ac_cfg;
extern const struct iwl_cfg iwl8265_2ac_cfg;
extern const struct iwl_cfg iwl8275_2ac_cfg;
extern const struct iwl_cfg iwl4165_2ac_cfg;
extern const struct iwl_cfg iwl8260_2ac_sdio_cfg;
extern const struct iwl_cfg iwl8265_2ac_sdio_cfg;
......@@ -454,6 +455,7 @@ extern const struct iwl_cfg iwl9160_2ac_cfg;
extern const struct iwl_cfg iwl9260_2ac_cfg;
extern const struct iwl_cfg iwl9270_2ac_cfg;
extern const struct iwl_cfg iwl9460_2ac_cfg;
extern const struct iwl_cfg iwl9560_2ac_cfg;
extern const struct iwl_cfg iwla000_2ac_cfg;
#endif /* CONFIG_IWLMVM */
......
......@@ -589,6 +589,8 @@ enum dtd_diode_reg {
* Causes for the FH register interrupts
*/
enum msix_fh_int_causes {
MSIX_FH_INT_CAUSES_Q0 = BIT(0),
MSIX_FH_INT_CAUSES_Q1 = BIT(1),
MSIX_FH_INT_CAUSES_D2S_CH0_NUM = BIT(16),
MSIX_FH_INT_CAUSES_D2S_CH1_NUM = BIT(17),
MSIX_FH_INT_CAUSES_S2D = BIT(19),
......
......@@ -643,6 +643,7 @@ struct iwl_rb_status {
#define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP)
#define IWL_TX_DMA_MASK DMA_BIT_MASK(36)
#define IWL_NUM_OF_TBS 20
#define IWL_TFH_NUM_TBS 25
static inline u8 iwl_get_dma_hi_addr(dma_addr_t addr)
{
......@@ -664,25 +665,29 @@ struct iwl_tfd_tb {
} __packed;
/**
* struct iwl_tfd
* struct iwl_tfh_tb transmit buffer descriptor within transmit frame descriptor
*
* Transmit Frame Descriptor (TFD)
*
* @ __reserved1[3] reserved
* @ num_tbs 0-4 number of active tbs
* 5 reserved
* 6-7 padding (not used)
* @ tbs[20] transmit frame buffer descriptors
* @ __pad padding
* This structure contains dma address and length of transmission address
*
* @tb_len length of the tx buffer
* @addr 64 bits dma address
*/
struct iwl_tfh_tb {
__le16 tb_len;
__le64 addr;
} __packed;
/**
* Each Tx queue uses a circular buffer of 256 TFDs stored in host DRAM.
* Both driver and device share these circular buffers, each of which must be
* contiguous 256 TFDs x 128 bytes-per-TFD = 32 KBytes
* contiguous 256 TFDs.
* For pre a000 HW it is 256 x 128 bytes-per-TFD = 32 KBytes
* For a000 HW and on it is 256 x 256 bytes-per-TFD = 65 KBytes
*
* Driver must indicate the physical address of the base of each
* circular buffer via the FH_MEM_CBBC_QUEUE registers.
*
* Each TFD contains pointer/size information for up to 20 data buffers
* Each TFD contains pointer/size information for up to 20 / 25 data buffers
* in host DRAM. These buffers collectively contain the (one) frame described
* by the TFD. Each buffer must be a single contiguous block of memory within
* itself, but buffers may be scattered in host DRAM. Each buffer has max size
......@@ -691,6 +696,16 @@ struct iwl_tfd_tb {
*
* A maximum of 255 (not 256!) TFDs may be on a queue waiting for Tx.
*/
/**
* struct iwl_tfd - Transmit Frame Descriptor (TFD)
* @ __reserved1[3] reserved
* @ num_tbs 0-4 number of active tbs
* 5 reserved
* 6-7 padding (not used)
* @ tbs[20] transmit frame buffer descriptors
* @ __pad padding
*/
struct iwl_tfd {
u8 __reserved1[3];
u8 num_tbs;
......@@ -698,6 +713,19 @@ struct iwl_tfd {
__le32 __pad;
} __packed;
/**
* struct iwl_tfh_tfd - Transmit Frame Descriptor (TFD)
* @ num_tbs 0-4 number of active tbs
* 5 -15 reserved
* @ tbs[25] transmit frame buffer descriptors
* @ __pad padding
*/
struct iwl_tfh_tfd {
__le16 num_tbs;
struct iwl_tfh_tb tbs[IWL_TFH_NUM_TBS];
__le32 __pad;
} __packed;
/* Keep Warm Size */
#define IWL_KW_SIZE 0x1000 /* 4k */
......@@ -706,8 +734,13 @@ struct iwl_tfd {
/**
* struct iwlagn_schedq_bc_tbl scheduler byte count table
* base physical address provided by SCD_DRAM_BASE_ADDR
* For devices up to a000:
* @tfd_offset 0-12 - tx command byte count
* 12-16 - station index
* For a000 and on:
* @tfd_offset 0-12 - tx command byte count
* 12-13 - number of 64 byte chunks
* 14-16 - reserved
*/
struct iwlagn_scd_bc_tbl {
__le16 tfd_offset[TFD_QUEUE_BC_SIZE];
......
......@@ -329,4 +329,13 @@ iwl_fw_dbg_conf_usniffer(const struct iwl_fw *fw, u8 id)
return conf_tlv->usniffer;
}
static inline const struct fw_img *
iwl_get_ucode_image(const struct iwl_fw *fw, enum iwl_ucode_type ucode_type)
{
if (ucode_type >= IWL_UCODE_TYPE_MAX)
return NULL;
return &fw->img[ucode_type];
}
#endif /* __iwl_fw_h__ */
......@@ -267,7 +267,7 @@ static const char *get_rfh_string(int cmd)
IWL_CMD_MQ(cmd, RFH_Q_FRBDCB_WIDX, i);
IWL_CMD_MQ(cmd, RFH_Q_FRBDCB_RIDX, i);
IWL_CMD_MQ(cmd, RFH_Q_URBD_STTS_WPTR_LSB, i);
};
}
switch (cmd) {
IWL_CMD(RFH_RXF_DMA_CFG);
......
......@@ -65,6 +65,7 @@
#include "iwl-trans.h"
#include "iwl-drv.h"
#include "iwl-fh.h"
struct iwl_trans *iwl_trans_alloc(unsigned int priv_size,
struct device *dev,
......
......@@ -262,8 +262,6 @@ static inline u32 iwl_rx_packet_payload_len(const struct iwl_rx_packet *pkt)
* (i.e. mark it as non-idle).
* @CMD_WANT_ASYNC_CALLBACK: the op_mode's async callback function must be
* called after this command completes. Valid only with CMD_ASYNC.
* @CMD_TB_BITMAP_POS: Position of the first bit for the TB bitmap. We need to
* check that we leave enough room for the TBs bitmap which needs 20 bits.
*/
enum CMD_MODE {
CMD_ASYNC = BIT(0),
......@@ -274,8 +272,6 @@ enum CMD_MODE {
CMD_MAKE_TRANS_IDLE = BIT(5),
CMD_WAKE_UP_TRANS = BIT(6),
CMD_WANT_ASYNC_CALLBACK = BIT(7),
CMD_TB_BITMAP_POS = 11,
};
#define DEF_CMD_PAYLOAD_SIZE 320
......@@ -649,6 +645,8 @@ struct iwl_trans_ops {
void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id,
bool shared);
dma_addr_t (*get_txq_byte_table)(struct iwl_trans *trans, int txq_id);
int (*wait_tx_queue_empty)(struct iwl_trans *trans, u32 txq_bm);
void (*freeze_txq_timer)(struct iwl_trans *trans, unsigned long txqs,
bool freeze);
......@@ -1073,6 +1071,15 @@ static inline void iwl_trans_txq_set_shared_mode(struct iwl_trans *trans,
trans->ops->txq_set_shared_mode(trans, queue, shared_mode);
}
static inline dma_addr_t iwl_trans_get_txq_byte_table(struct iwl_trans *trans,
int queue)
{
/* we should never be called if the trans doesn't support it */
BUG_ON(!trans->ops->get_txq_byte_table);
return trans->ops->get_txq_byte_table(trans, queue);
}
static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
int fifo, int sta_id, int tid,
int frame_limit, u16 ssn,
......
......@@ -577,6 +577,85 @@ struct iwl_mvm_ba_notif {
u8 reserved1;
} __packed;
/**
* struct iwl_mvm_compressed_ba_tfd - progress of a TFD queue
* @q_num: TFD queue number
* @tfd_index: Index of first un-acked frame in the TFD queue
*/
struct iwl_mvm_compressed_ba_tfd {
u8 q_num;
u8 reserved;
__le16 tfd_index;
} __packed; /* COMPRESSED_BA_TFD_API_S_VER_1 */
/**
* struct iwl_mvm_compressed_ba_ratid - progress of a RA TID queue
* @q_num: RA TID queue number
* @tid: TID of the queue
* @ssn: BA window current SSN
*/
struct iwl_mvm_compressed_ba_ratid {
u8 q_num;
u8 tid;
__le16 ssn;
} __packed; /* COMPRESSED_BA_RATID_API_S_VER_1 */
/*
* enum iwl_mvm_ba_resp_flags - TX aggregation status
* @IWL_MVM_BA_RESP_TX_AGG: generated due to BA
* @IWL_MVM_BA_RESP_TX_BAR: generated due to BA after BAR
* @IWL_MVM_BA_RESP_TX_AGG_FAIL: aggregation didn't receive BA
* @IWL_MVM_BA_RESP_TX_UNDERRUN: aggregation got underrun
* @IWL_MVM_BA_RESP_TX_BT_KILL: aggregation got BT-kill
* @IWL_MVM_BA_RESP_TX_DSP_TIMEOUT: aggregation didn't finish within the
* expected time
*/
enum iwl_mvm_ba_resp_flags {
IWL_MVM_BA_RESP_TX_AGG,
IWL_MVM_BA_RESP_TX_BAR,
IWL_MVM_BA_RESP_TX_AGG_FAIL,
IWL_MVM_BA_RESP_TX_UNDERRUN,
IWL_MVM_BA_RESP_TX_BT_KILL,
IWL_MVM_BA_RESP_TX_DSP_TIMEOUT
};
/**
* struct iwl_mvm_compressed_ba_notif - notifies about reception of BA
* ( BA_NOTIF = 0xc5 )
* @flags: status flag, see the &iwl_mvm_ba_resp_flags
* @sta_id: Index of recipient (BA-sending) station in fw's station table
* @reduced_txp: power reduced according to TPC. This is the actual value and
* not a copy from the LQ command. Thus, if not the first rate was used
* for Tx-ing then this value will be set to 0 by FW.
* @initial_rate: TLC rate info, initial rate index, TLC table color
* @retry_cnt: retry count
* @query_byte_cnt: SCD query byte count
* @query_frame_cnt: SCD query frame count
* @txed: number of frames sent in the aggregation (all-TIDs)
* @done: number of frames that were Acked by the BA (all-TIDs)
* @wireless_time: Wireless-media time
* @tx_rate: the rate the aggregation was sent at
* @tfd_cnt: number of TFD-Q elements
* @ra_tid_cnt: number of RATID-Q elements
*/
struct iwl_mvm_compressed_ba_notif {
__le32 flags;
u8 sta_id;
u8 reduced_txp;
u8 initial_rate;
u8 retry_cnt;
__le32 query_byte_cnt;
__le16 query_frame_cnt;
__le16 txed;
__le16 done;
__le32 wireless_time;
__le32 tx_rate;
__le16 tfd_cnt;
__le16 ra_tid_cnt;
struct iwl_mvm_compressed_ba_tfd tfd[1];
struct iwl_mvm_compressed_ba_ratid ra_tid[0];
} __packed; /* COMPRESSED_BA_RES_API_S_VER_4 */
/**
* struct iwl_mac_beacon_cmd_v6 - beacon template command
* @tx: the tx commands associated with the beacon frame
......
......@@ -1977,7 +1977,8 @@ struct iwl_tdls_config_res {
struct iwl_tdls_config_sta_info_res sta_info[IWL_MVM_TDLS_STA_COUNT];
} __packed; /* TDLS_CONFIG_RSP_API_S_VER_1 */
#define TX_FIFO_MAX_NUM 8
#define TX_FIFO_MAX_NUM_9000 8
#define TX_FIFO_MAX_NUM 15
#define RX_FIFO_MAX_NUM 2
#define TX_FIFO_INTERNAL_MAX_NUM 6
......@@ -2004,6 +2005,21 @@ struct iwl_tdls_config_res {
* NOTE: on firmware that don't have IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG
* set, the last 3 members don't exist.
*/
struct iwl_shared_mem_cfg_v1 {
__le32 shared_mem_addr;
__le32 shared_mem_size;
__le32 sample_buff_addr;
__le32 sample_buff_size;
__le32 txfifo_addr;
__le32 txfifo_size[TX_FIFO_MAX_NUM_9000];
__le32 rxfifo_size[RX_FIFO_MAX_NUM];
__le32 page_buff_addr;
__le32 page_buff_size;
__le32 rxfifo_addr;
__le32 internal_txfifo_addr;
__le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
} __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */
struct iwl_shared_mem_cfg {
__le32 shared_mem_addr;
__le32 shared_mem_size;
......@@ -2017,7 +2033,7 @@ struct iwl_shared_mem_cfg {
__le32 rxfifo_addr;
__le32 internal_txfifo_addr;
__le32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
} __packed; /* SHARED_MEM_ALLOC_API_S_VER_2 */
} __packed; /* SHARED_MEM_ALLOC_API_S_VER_3 */
/**
* VHT MU-MIMO group configuration
......
......@@ -440,14 +440,12 @@ static const struct iwl_prph_range iwl_prph_dump_addr_comm[] = {
{ .start = 0x00a04560, .end = 0x00a0457c },
{ .start = 0x00a04590, .end = 0x00a04598 },
{ .start = 0x00a045c0, .end = 0x00a045f4 },
{ .start = 0x00a44000, .end = 0x00a7bf80 },
};
static const struct iwl_prph_range iwl_prph_dump_addr_9000[] = {
{ .start = 0x00a05c00, .end = 0x00a05c18 },
{ .start = 0x00a05400, .end = 0x00a056e8 },
{ .start = 0x00a08000, .end = 0x00a098bc },
{ .start = 0x00adfc00, .end = 0x00adfd1c },
{ .start = 0x00a02400, .end = 0x00a02758 },
};
......@@ -559,7 +557,7 @@ void iwl_mvm_fw_error_dump(struct iwl_mvm *mvm)
sizeof(struct iwl_fw_error_dump_fifo);
}
for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++) {
for (i = 0; i < mem_cfg->num_txfifo_entries; i++) {
if (!mem_cfg->txfifo_size[i])
continue;
......
......@@ -90,15 +90,6 @@ struct iwl_mvm_alive_data {
u32 scd_base_addr;
};
static inline const struct fw_img *
iwl_get_ucode_image(struct iwl_mvm *mvm, enum iwl_ucode_type ucode_type)
{
if (ucode_type >= IWL_UCODE_TYPE_MAX)
return NULL;
return &mvm->fw->img[ucode_type];
}
static int iwl_send_tx_ant_cfg(struct iwl_mvm *mvm, u8 valid_tx_ant)
{
struct iwl_tx_ant_cfg_cmd tx_ant_cmd = {
......@@ -592,9 +583,9 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm,
iwl_fw_dbg_conf_usniffer(mvm->fw, FW_DBG_START_FROM_ALIVE) &&
!(fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_USNIFFER_UNIFIED)))
fw = iwl_get_ucode_image(mvm, IWL_UCODE_REGULAR_USNIFFER);
fw = iwl_get_ucode_image(mvm->fw, IWL_UCODE_REGULAR_USNIFFER);
else
fw = iwl_get_ucode_image(mvm, ucode_type);
fw = iwl_get_ucode_image(mvm->fw, ucode_type);
if (WARN_ON(!fw))
return -EINVAL;
mvm->cur_ucode = ucode_type;
......@@ -838,59 +829,48 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
return ret;
}
static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
static void iwl_mvm_parse_shared_mem_a000(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
struct iwl_host_cmd cmd = {
.flags = CMD_WANT_SKB,
.data = { NULL, },
.len = { 0, },
};
struct iwl_shared_mem_cfg *mem_cfg;
struct iwl_rx_packet *pkt;
u32 i;
struct iwl_shared_mem_cfg *mem_cfg = (void *)pkt->data;
int i;
lockdep_assert_held(&mvm->mutex);
mvm->shared_mem_cfg.num_txfifo_entries =
ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size);
for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++)
mvm->shared_mem_cfg.txfifo_size[i] =
le32_to_cpu(mem_cfg->txfifo_size[i]);
for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
mvm->shared_mem_cfg.rxfifo_size[i] =
le32_to_cpu(mem_cfg->rxfifo_size[i]);
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0);
else
cmd.id = SHARED_MEM_CFG;
BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) !=
sizeof(mem_cfg->internal_txfifo_size));
if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd)))
return;
for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.internal_txfifo_size);
i++)
mvm->shared_mem_cfg.internal_txfifo_size[i] =
le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
}
pkt = cmd.resp_pkt;
mem_cfg = (void *)pkt->data;
mvm->shared_mem_cfg.shared_mem_addr =
le32_to_cpu(mem_cfg->shared_mem_addr);
mvm->shared_mem_cfg.shared_mem_size =
le32_to_cpu(mem_cfg->shared_mem_size);
mvm->shared_mem_cfg.sample_buff_addr =
le32_to_cpu(mem_cfg->sample_buff_addr);
mvm->shared_mem_cfg.sample_buff_size =
le32_to_cpu(mem_cfg->sample_buff_size);
mvm->shared_mem_cfg.txfifo_addr = le32_to_cpu(mem_cfg->txfifo_addr);
for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size); i++)
static void iwl_mvm_parse_shared_mem(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
struct iwl_shared_mem_cfg_v1 *mem_cfg = (void *)pkt->data;
int i;
mvm->shared_mem_cfg.num_txfifo_entries =
ARRAY_SIZE(mvm->shared_mem_cfg.txfifo_size);
for (i = 0; i < ARRAY_SIZE(mem_cfg->txfifo_size); i++)
mvm->shared_mem_cfg.txfifo_size[i] =
le32_to_cpu(mem_cfg->txfifo_size[i]);
for (i = 0; i < ARRAY_SIZE(mvm->shared_mem_cfg.rxfifo_size); i++)
mvm->shared_mem_cfg.rxfifo_size[i] =
le32_to_cpu(mem_cfg->rxfifo_size[i]);
mvm->shared_mem_cfg.page_buff_addr =
le32_to_cpu(mem_cfg->page_buff_addr);
mvm->shared_mem_cfg.page_buff_size =
le32_to_cpu(mem_cfg->page_buff_size);
/* new API has more data */
/* new API has more data, from rxfifo_addr field and on */
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) {
mvm->shared_mem_cfg.rxfifo_addr =
le32_to_cpu(mem_cfg->rxfifo_addr);
mvm->shared_mem_cfg.internal_txfifo_addr =
le32_to_cpu(mem_cfg->internal_txfifo_addr);
BUILD_BUG_ON(sizeof(mvm->shared_mem_cfg.internal_txfifo_size) !=
sizeof(mem_cfg->internal_txfifo_size));
......@@ -900,6 +880,33 @@ static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
mvm->shared_mem_cfg.internal_txfifo_size[i] =
le32_to_cpu(mem_cfg->internal_txfifo_size[i]);
}
}
static void iwl_mvm_get_shared_mem_conf(struct iwl_mvm *mvm)
{
struct iwl_host_cmd cmd = {
.flags = CMD_WANT_SKB,
.data = { NULL, },
.len = { 0, },
};
struct iwl_rx_packet *pkt;
lockdep_assert_held(&mvm->mutex);
if (fw_has_capa(&mvm->fw->ucode_capa,
IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG))
cmd.id = iwl_cmd_id(SHARED_MEM_CFG_CMD, SYSTEM_GROUP, 0);
else
cmd.id = SHARED_MEM_CFG;
if (WARN_ON(iwl_mvm_send_cmd(mvm, &cmd)))
return;
pkt = cmd.resp_pkt;
if (iwl_mvm_has_new_tx_api(mvm))
iwl_mvm_parse_shared_mem_a000(mvm, pkt);
else
iwl_mvm_parse_shared_mem(mvm, pkt);
IWL_DEBUG_INFO(mvm, "SHARED MEM CFG: got memory offsets/sizes\n");
......
......@@ -604,16 +604,9 @@ enum iwl_mvm_tdls_cs_state {
};
struct iwl_mvm_shared_mem_cfg {
u32 shared_mem_addr;
u32 shared_mem_size;
u32 sample_buff_addr;
u32 sample_buff_size;
u32 txfifo_addr;
int num_txfifo_entries;
u32 txfifo_size[TX_FIFO_MAX_NUM];
u32 rxfifo_size[RX_FIFO_MAX_NUM];
u32 page_buff_addr;
u32 page_buff_size;
u32 rxfifo_addr;
u32 internal_txfifo_addr;
u32 internal_txfifo_size[TX_FIFO_INTERNAL_MAX_NUM];
};
......
......@@ -1672,7 +1672,7 @@ static void iwl_mvm_rx_mq_rss(struct iwl_op_mode *op_mode,
else if (unlikely(pkt->hdr.cmd == RX_QUEUES_NOTIFICATION &&
pkt->hdr.group_id == DATA_PATH_GROUP))
iwl_mvm_rx_queue_notif(mvm, rxb, queue);
else
else if (likely(pkt->hdr.cmd == REPLY_RX_MPDU_CMD))
iwl_mvm_rx_mpdu_mq(mvm, napi, rxb, queue);
}
......
......@@ -452,10 +452,10 @@ void iwl_mvm_reorder_timer_expired(unsigned long data)
u16 sn = 0, index = 0;
bool expired = false;
spin_lock_bh(&buf->lock);
spin_lock(&buf->lock);
if (!buf->num_stored || buf->removed) {
spin_unlock_bh(&buf->lock);
spin_unlock(&buf->lock);
return;
}
......@@ -492,7 +492,7 @@ void iwl_mvm_reorder_timer_expired(unsigned long data)
buf->reorder_time[index] +
1 + RX_REORDER_BUF_TIMEOUT_MQ);
}
spin_unlock_bh(&buf->lock);
spin_unlock(&buf->lock);
}
static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
......@@ -503,7 +503,7 @@ static void iwl_mvm_del_ba(struct iwl_mvm *mvm, int queue,
struct iwl_mvm_reorder_buffer *reorder_buf;
u8 baid = data->baid;
if (WARN_ON_ONCE(baid >= IWL_RX_REORDER_DATA_INVALID_BAID))
if (WARN_ONCE(baid >= IWL_MAX_BAID, "invalid BAID: %x\n", baid))
return;
rcu_read_lock();
......
......@@ -588,9 +588,7 @@ int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid,
ret);
/* Make sure the SCD wrptr is correctly set before reconfiguring */
iwl_trans_txq_enable(mvm->trans, queue, iwl_mvm_ac_to_tx_fifo[ac],
cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF,
ssn, wdg_timeout);
iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
/* Update the TID "owner" of the queue */
spin_lock_bh(&mvm->queue_info_lock);
......@@ -1498,9 +1496,31 @@ int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
/* If DQA is supported - the queues can be disabled now */
if (iwl_mvm_is_dqa_supported(mvm))
if (iwl_mvm_is_dqa_supported(mvm)) {
u8 reserved_txq = mvm_sta->reserved_queue;
enum iwl_mvm_queue_status *status;
iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta);
/*
* If no traffic has gone through the reserved TXQ - it
* is still marked as IWL_MVM_QUEUE_RESERVED, and
* should be manually marked as free again
*/
spin_lock_bh(&mvm->queue_info_lock);
status = &mvm->queue_info[reserved_txq].status;
if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
(*status != IWL_MVM_QUEUE_FREE),
"sta_id %d reserved txq %d status %d",
mvm_sta->sta_id, reserved_txq, *status)) {
spin_unlock_bh(&mvm->queue_info_lock);
return -EINVAL;
}
*status = IWL_MVM_QUEUE_FREE;
spin_unlock_bh(&mvm->queue_info_lock);
}
if (vif->type == NL80211_IFTYPE_STATION &&
mvmvif->ap_sta_id == mvm_sta->sta_id) {
/* if associated - we can't remove the AP STA now */
......@@ -2030,11 +2050,9 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
baid_data->baid = baid;
baid_data->timeout = timeout;
baid_data->last_rx = jiffies;
init_timer(&baid_data->session_timer);
baid_data->session_timer.function =
iwl_mvm_rx_agg_session_expired;
baid_data->session_timer.data =
(unsigned long)&mvm->baid_map[baid];
setup_timer(&baid_data->session_timer,
iwl_mvm_rx_agg_session_expired,
(unsigned long)&mvm->baid_map[baid]);
baid_data->mvm = mvm;
baid_data->tid = tid;
baid_data->sta_id = mvm_sta->sta_id;
......
......@@ -920,9 +920,13 @@ static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
tid = IWL_MAX_TID_COUNT;
}
if (iwl_mvm_is_dqa_supported(mvm))
if (iwl_mvm_is_dqa_supported(mvm)) {
txq_id = mvmsta->tid_data[tid].txq_id;
if (ieee80211_is_mgmt(fc))
tx_cmd->tid_tspec = IWL_TID_NON_QOS;
}
/* Copy MAC header from skb into command buffer */
memcpy(tx_cmd->hdr, hdr, hdrlen);
......@@ -1100,9 +1104,13 @@ static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
IWL_DEBUG_TX_QUEUES(mvm,
"Can continue DELBA flow ssn = next_recl = %d\n",
tid_data->next_reclaimed);
if (!iwl_mvm_is_dqa_supported(mvm)) {
u8 mac80211_ac = tid_to_mac80211_ac[tid];
iwl_mvm_disable_txq(mvm, tid_data->txq_id,
vif->hw_queue[tid_to_mac80211_ac[tid]], tid,
vif->hw_queue[mac80211_ac], tid,
CMD_ASYNC);
}
tid_data->state = IWL_AGG_OFF;
ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
break;
......@@ -1580,41 +1588,16 @@ void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
iwl_mvm_rx_tx_cmd_agg(mvm, pkt);
}
static void iwl_mvm_tx_info_from_ba_notif(struct ieee80211_tx_info *info,
struct iwl_mvm_ba_notif *ba_notif,
struct iwl_mvm_tid_data *tid_data)
{
info->flags |= IEEE80211_TX_STAT_AMPDU;
info->status.ampdu_ack_len = ba_notif->txed_2_done;
info->status.ampdu_len = ba_notif->txed;
iwl_mvm_hwrate_to_tx_status(tid_data->rate_n_flags,
info);
/* TODO: not accounted if the whole A-MPDU failed */
info->status.tx_time = tid_data->tx_time;
info->status.status_driver_data[0] =
(void *)(uintptr_t)ba_notif->reduced_txp;
info->status.status_driver_data[1] =
(void *)(uintptr_t)tid_data->rate_n_flags;
}
void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
int txq, int index,
struct ieee80211_tx_info *ba_info, u32 rate)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
struct iwl_mvm_ba_notif *ba_notif = (void *)pkt->data;
struct sk_buff_head reclaimed_skbs;
struct iwl_mvm_tid_data *tid_data;
struct ieee80211_sta *sta;
struct iwl_mvm_sta *mvmsta;
struct sk_buff *skb;
int sta_id, tid, freed;
/* "flow" corresponds to Tx queue */
u16 scd_flow = le16_to_cpu(ba_notif->scd_flow);
/* "ssn" is start of block-ack Tx window, corresponds to index
* (in Tx queue's circular buffer) of first TFD/frame in window */
u16 ba_resp_scd_ssn = le16_to_cpu(ba_notif->scd_ssn);
sta_id = ba_notif->sta_id;
tid = ba_notif->tid;
int freed;
if (WARN_ONCE(sta_id >= IWL_MVM_STATION_COUNT ||
tid >= IWL_MAX_TID_COUNT,
......@@ -1634,10 +1617,10 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
mvmsta = iwl_mvm_sta_from_mac80211(sta);
tid_data = &mvmsta->tid_data[tid];
if (tid_data->txq_id != scd_flow) {
if (tid_data->txq_id != txq) {
IWL_ERR(mvm,
"invalid BA notification: Q %d, tid %d, flow %d\n",
tid_data->txq_id, tid, scd_flow);
"invalid BA notification: Q %d, tid %d\n",
tid_data->txq_id, tid);
rcu_read_unlock();
return;
}
......@@ -1651,27 +1634,14 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
* block-ack window (we assume that they've been successfully
* transmitted ... if not, it's too late anyway).
*/
iwl_trans_reclaim(mvm->trans, scd_flow, ba_resp_scd_ssn,
&reclaimed_skbs);
IWL_DEBUG_TX_REPLY(mvm,
"BA_NOTIFICATION Received from %pM, sta_id = %d\n",
(u8 *)&ba_notif->sta_addr_lo32,
ba_notif->sta_id);
IWL_DEBUG_TX_REPLY(mvm,
"TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl),
(unsigned long long)le64_to_cpu(ba_notif->bitmap),
scd_flow, ba_resp_scd_ssn, ba_notif->txed,
ba_notif->txed_2_done);
iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs);
IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n",
ba_notif->reduced_txp);
tid_data->next_reclaimed = ba_resp_scd_ssn;
tid_data->next_reclaimed = index;
iwl_mvm_check_ratid_empty(mvm, sta, tid);
freed = 0;
ba_info->status.status_driver_data[1] = (void *)(uintptr_t)rate;
skb_queue_walk(&reclaimed_skbs, skb) {
struct ieee80211_hdr *hdr = (void *)skb->data;
......@@ -1693,8 +1663,12 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
/* this is the first skb we deliver in this batch */
/* put the rate scaling data there */
if (freed == 1)
iwl_mvm_tx_info_from_ba_notif(info, ba_notif, tid_data);
if (freed == 1) {
info->flags |= IEEE80211_TX_STAT_AMPDU;
memcpy(&info->status, &ba_info->status,
sizeof(ba_info->status));
iwl_mvm_hwrate_to_tx_status(rate, info);
}
}
spin_unlock_bh(&mvmsta->lock);
......@@ -1704,7 +1678,6 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
* Still it's important to update RS about sent vs. acked.
*/
if (skb_queue_empty(&reclaimed_skbs)) {
struct ieee80211_tx_info ba_info = {};
struct ieee80211_chanctx_conf *chanctx_conf = NULL;
if (mvmsta->vif)
......@@ -1714,11 +1687,11 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
if (WARN_ON_ONCE(!chanctx_conf))
goto out;
ba_info.band = chanctx_conf->def.chan->band;
iwl_mvm_tx_info_from_ba_notif(&ba_info, ba_notif, tid_data);
ba_info->band = chanctx_conf->def.chan->band;
iwl_mvm_hwrate_to_tx_status(rate, ba_info);
IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n");
iwl_mvm_rs_tx_status(mvm, sta, tid, &ba_info, false);
iwl_mvm_rs_tx_status(mvm, sta, tid, ba_info, false);
}
out:
......@@ -1730,6 +1703,92 @@ void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
}
}
void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
int sta_id, tid, txq, index;
struct ieee80211_tx_info ba_info = {};
struct iwl_mvm_ba_notif *ba_notif;
struct iwl_mvm_tid_data *tid_data;
struct iwl_mvm_sta *mvmsta;
if (iwl_mvm_has_new_tx_api(mvm)) {
struct iwl_mvm_compressed_ba_notif *ba_res =
(void *)pkt->data;
sta_id = ba_res->sta_id;
ba_info.status.ampdu_ack_len = (u8)le16_to_cpu(ba_res->done);
ba_info.status.ampdu_len = (u8)le16_to_cpu(ba_res->txed);
ba_info.status.tx_time =
(u16)le32_to_cpu(ba_res->wireless_time);
ba_info.status.status_driver_data[0] =
(void *)(uintptr_t)ba_res->reduced_txp;
/*
* TODO:
* When supporting multi TID aggregations - we need to move
* next_reclaimed to be per TXQ and not per TID or handle it
* in a different way.
* This will go together with SN and AddBA offload and cannot
* be handled properly for now.
*/
WARN_ON(le16_to_cpu(ba_res->tfd_cnt) != 1);
iwl_mvm_tx_reclaim(mvm, sta_id, ba_res->ra_tid[0].tid,
(int)ba_res->tfd[0].q_num,
le16_to_cpu(ba_res->tfd[0].tfd_index),
&ba_info, le32_to_cpu(ba_res->tx_rate));
IWL_DEBUG_TX_REPLY(mvm,
"BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
sta_id, le32_to_cpu(ba_res->flags),
le16_to_cpu(ba_res->txed),
le16_to_cpu(ba_res->done));
return;
}
ba_notif = (void *)pkt->data;
sta_id = ba_notif->sta_id;
tid = ba_notif->tid;
/* "flow" corresponds to Tx queue */
txq = le16_to_cpu(ba_notif->scd_flow);
/* "ssn" is start of block-ack Tx window, corresponds to index
* (in Tx queue's circular buffer) of first TFD/frame in window */
index = le16_to_cpu(ba_notif->scd_ssn);
rcu_read_lock();
mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
if (WARN_ON_ONCE(!mvmsta)) {
rcu_read_unlock();
return;
}
tid_data = &mvmsta->tid_data[tid];
ba_info.status.ampdu_ack_len = ba_notif->txed_2_done;
ba_info.status.ampdu_len = ba_notif->txed;
ba_info.status.tx_time = tid_data->tx_time;
ba_info.status.status_driver_data[0] =
(void *)(uintptr_t)ba_notif->reduced_txp;
rcu_read_unlock();
iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info,
tid_data->rate_n_flags);
IWL_DEBUG_TX_REPLY(mvm,
"BA_NOTIFICATION Received from %pM, sta_id = %d\n",
(u8 *)&ba_notif->sta_addr_lo32, ba_notif->sta_id);
IWL_DEBUG_TX_REPLY(mvm,
"TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl),
le64_to_cpu(ba_notif->bitmap), txq, index,
ba_notif->txed, ba_notif->txed_2_done);
IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n",
ba_notif->reduced_txp);
}
/*
* Note that there are transports that buffer frames before they reach
* the firmware. This means that after flush_tx_path is called, the
......
......@@ -500,6 +500,7 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x24FD, 0x0930, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0950, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0850, iwl8265_2ac_cfg)},
{IWL_PCI_DEVICE(0x24FD, 0x0012, iwl8275_2ac_cfg)},
/* 9000 Series */
{IWL_PCI_DEVICE(0x271B, 0x0010, iwl9160_2ac_cfg)},
......@@ -523,6 +524,10 @@ static const struct pci_device_id iwl_hw_card_ids[] = {
{IWL_PCI_DEVICE(0x9DF0, 0x0060, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0xA370, 0x0060, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x31DC, 0x0060, iwl9460_2ac_cfg)},
{IWL_PCI_DEVICE(0x2526, 0x0030, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x9DF0, 0x0030, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0xA370, 0x0030, iwl9560_2ac_cfg)},
{IWL_PCI_DEVICE(0x31DC, 0x0030, iwl9560_2ac_cfg)},
/* a000 Series */
{IWL_PCI_DEVICE(0x2720, 0x0A10, iwla000_2ac_cfg)},
......
......@@ -37,6 +37,7 @@
#include <linux/wait.h>
#include <linux/pci.h>
#include <linux/timer.h>
#include <linux/cpu.h>
#include "iwl-fh.h"
#include "iwl-csr.h"
......@@ -49,7 +50,7 @@
* be needed for potential data in the SKB's head. The remaining ones can
* be used for frags.
*/
#define IWL_PCIE_MAX_FRAGS (IWL_NUM_OF_TBS - 3)
#define IWL_PCIE_MAX_FRAGS(x) (x->max_tbs - 3)
/*
* RX related structures and functions
......@@ -192,41 +193,9 @@ struct iwl_cmd_meta {
/* only for SYNC commands, iff the reply skb is wanted */
struct iwl_host_cmd *source;
u32 flags;
u32 tbs;
};
/*
* Generic queue structure
*
* Contains common data for Rx and Tx queues.
*
* Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
* always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
* there might be HW changes in the future). For the normal TX
* queues, n_window, which is the size of the software queue data
* is also 256; however, for the command queue, n_window is only
* 32 since we don't need so many commands pending. Since the HW
* still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256. As a result,
* the software buffers (in the variables @meta, @txb in struct
* iwl_txq) only have 32 entries, while the HW buffers (@tfds in
* the same struct) have 256.
* This means that we end up with the following:
* HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
* SW entries: | 0 | ... | 31 |
* where N is a number between 0 and 7. This means that the SW
* data is a window overlayed over the HW queue.
*/
struct iwl_queue {
int write_ptr; /* 1-st empty entry (index) host_w*/
int read_ptr; /* last used entry (index) host_r*/
/* use for monitoring and recovering the stuck queue */
dma_addr_t dma_addr; /* physical addr for BD's */
int n_window; /* safe queue window */
u32 id;
int low_mark; /* low watermark, resume queue if free
* space more than this */
int high_mark; /* high watermark, stop queue if free
* space less than this */
};
#define TFD_TX_CMD_SLOTS 256
#define TFD_CMD_SLOTS 32
......@@ -273,13 +242,32 @@ struct iwl_pcie_first_tb_buf {
* @wd_timeout: queue watchdog timeout (jiffies) - per queue
* @frozen: tx stuck queue timer is frozen
* @frozen_expiry_remainder: remember how long until the timer fires
* @write_ptr: 1-st empty entry (index) host_w
* @read_ptr: last used entry (index) host_r
* @dma_addr: physical addr for BD's
* @n_window: safe queue window
* @id: queue id
* @low_mark: low watermark, resume queue if free space more than this
* @high_mark: high watermark, stop queue if free space less than this
*
* A Tx queue consists of circular buffer of BDs (a.k.a. TFDs, transmit frame
* descriptors) and required locking structures.
*
* Note the difference between TFD_QUEUE_SIZE_MAX and n_window: the hardware
* always assumes 256 descriptors, so TFD_QUEUE_SIZE_MAX is always 256 (unless
* there might be HW changes in the future). For the normal TX
* queues, n_window, which is the size of the software queue data
* is also 256; however, for the command queue, n_window is only
* 32 since we don't need so many commands pending. Since the HW
* still uses 256 BDs for DMA though, TFD_QUEUE_SIZE_MAX stays 256.
* This means that we end up with the following:
* HW entries: | 0 | ... | N * 32 | ... | N * 32 + 31 | ... | 255 |
* SW entries: | 0 | ... | 31 |
* where N is a number between 0 and 7. This means that the SW
* data is a window overlayed over the HW queue.
*/
struct iwl_txq {
struct iwl_queue q;
struct iwl_tfd *tfds;
void *tfds;
struct iwl_pcie_first_tb_buf *first_tb_bufs;
dma_addr_t first_tb_dma;
struct iwl_pcie_txq_entry *entries;
......@@ -294,6 +282,14 @@ struct iwl_txq {
bool block;
unsigned long wd_timeout;
struct sk_buff_head overflow_q;
int write_ptr;
int read_ptr;
dma_addr_t dma_addr;
int n_window;
u32 id;
int low_mark;
int high_mark;
};
static inline dma_addr_t
......@@ -308,6 +304,16 @@ struct iwl_tso_hdr_page {
u8 *pos;
};
/**
* enum iwl_shared_irq_flags - level of sharing for irq
* @IWL_SHARED_IRQ_NON_RX: interrupt vector serves non rx causes.
* @IWL_SHARED_IRQ_FIRST_RSS: interrupt vector serves first RSS queue.
*/
enum iwl_shared_irq_flags {
IWL_SHARED_IRQ_NON_RX = BIT(0),
IWL_SHARED_IRQ_FIRST_RSS = BIT(1),
};
/**
* struct iwl_trans_pcie - PCIe transport specific data
* @rxq: all the RX queue data
......@@ -338,8 +344,10 @@ struct iwl_tso_hdr_page {
* @fw_mon_size: size of the buffer for the firmware monitor
* @msix_entries: array of MSI-X entries
* @msix_enabled: true if managed to enable MSI-X
* @allocated_vector: the number of interrupt vector allocated by the OS
* @default_irq_num: default irq for non rx interrupt
* @shared_vec_mask: the type of causes the shared vector handles
* (see iwl_shared_irq_flags).
* @alloc_vecs: the number of interrupt vectors allocated by the OS
* @def_irq: default irq for non rx causes
* @fh_init_mask: initial unmasked fh causes
* @hw_init_mask: initial unmasked hw causes
* @fh_mask: current unmasked fh causes
......@@ -391,6 +399,8 @@ struct iwl_trans_pcie {
unsigned int cmd_q_wdg_timeout;
u8 n_no_reclaim_cmds;
u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
u8 max_tbs;
u16 tfd_size;
enum iwl_amsdu_size rx_buf_size;
bool bc_table_dword;
......@@ -410,12 +420,14 @@ struct iwl_trans_pcie {
struct msix_entry msix_entries[IWL_MAX_RX_HW_QUEUES];
bool msix_enabled;
u32 allocated_vector;
u32 default_irq_num;
u8 shared_vec_mask;
u32 alloc_vecs;
u32 def_irq;
u32 fh_init_mask;
u32 hw_init_mask;
u32 fh_mask;
u32 hw_mask;
cpumask_t affinity_mask[IWL_MAX_RX_HW_QUEUES];
};
static inline struct iwl_trans_pcie *
......@@ -474,6 +486,7 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int queue,
bool configure_scd);
void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
bool shared_mode);
dma_addr_t iwl_trans_pcie_get_txq_byte_table(struct iwl_trans *trans, int txq);
void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans,
struct iwl_txq *txq);
int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
......@@ -486,9 +499,21 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs);
void iwl_trans_pcie_tx_reset(struct iwl_trans *trans);
static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_tfd *tfd, u8 idx)
static inline u16 iwl_pcie_tfd_tb_get_len(struct iwl_trans *trans, void *tfd,
u8 idx)
{
struct iwl_tfd_tb *tb = &tfd->tbs[idx];
struct iwl_tfd *tfd_fh;
struct iwl_tfd_tb *tb;
if (trans->cfg->use_tfh) {
struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
struct iwl_tfh_tb *tb = &tfd_fh->tbs[idx];
return le16_to_cpu(tb->tb_len);
}
tfd_fh = (void *)tfd;
tb = &tfd_fh->tbs[idx];
return le16_to_cpu(tb->hi_n_len) >> 4;
}
......@@ -617,9 +642,9 @@ static inline void iwl_wake_queue(struct iwl_trans *trans,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
if (test_and_clear_bit(txq->q.id, trans_pcie->queue_stopped)) {
IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->q.id);
iwl_op_mode_queue_not_full(trans->op_mode, txq->q.id);
if (test_and_clear_bit(txq->id, trans_pcie->queue_stopped)) {
IWL_DEBUG_TX_QUEUES(trans, "Wake hwq %d\n", txq->id);
iwl_op_mode_queue_not_full(trans->op_mode, txq->id);
}
}
......@@ -628,22 +653,22 @@ static inline void iwl_stop_queue(struct iwl_trans *trans,
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
if (!test_and_set_bit(txq->q.id, trans_pcie->queue_stopped)) {
iwl_op_mode_queue_full(trans->op_mode, txq->q.id);
IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->q.id);
if (!test_and_set_bit(txq->id, trans_pcie->queue_stopped)) {
iwl_op_mode_queue_full(trans->op_mode, txq->id);
IWL_DEBUG_TX_QUEUES(trans, "Stop hwq %d\n", txq->id);
} else
IWL_DEBUG_TX_QUEUES(trans, "hwq %d already stopped\n",
txq->q.id);
txq->id);
}
static inline bool iwl_queue_used(const struct iwl_queue *q, int i)
static inline bool iwl_queue_used(const struct iwl_txq *q, int i)
{
return q->write_ptr >= q->read_ptr ?
(i >= q->read_ptr && i < q->write_ptr) :
!(i < q->read_ptr && i >= q->write_ptr);
}
static inline u8 get_cmd_index(struct iwl_queue *q, u32 index)
static inline u8 get_cmd_index(struct iwl_txq *q, u32 index)
{
return index & (q->n_window - 1);
}
......
......@@ -1142,7 +1142,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
sequence = le16_to_cpu(pkt->hdr.sequence);
index = SEQ_TO_INDEX(sequence);
cmd_index = get_cmd_index(&txq->q, index);
cmd_index = get_cmd_index(txq, index);
if (rxq->id == 0)
iwl_op_mode_rx(trans->op_mode, &rxq->napi,
......@@ -1885,6 +1885,20 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
inta_fh,
iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
inta_fh & MSIX_FH_INT_CAUSES_Q0) {
local_bh_disable();
iwl_pcie_rx_handle(trans, 0);
local_bh_enable();
}
if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) &&
inta_fh & MSIX_FH_INT_CAUSES_Q1) {
local_bh_disable();
iwl_pcie_rx_handle(trans, 1);
local_bh_enable();
}
/* This "Tx" DMA channel is used only for loading uCode */
if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment