Commit 4beaf6c2 authored by Emmanuel Grumbach's avatar Emmanuel Grumbach Committed by Johannes Berg

iwlwifi: s/txq_setup/txq_enable

We need to be able to enable / disable Tx queues in HW
dynamically. So this function is no longer related to AGG
only. It can do the job for any queue, even AC ones. Change
the name to better reflect its role.

Also use the new function to configure the AC / CMD queues
in tx_start.
Signed-off-by: default avatarEmmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: default avatarJohannes Berg <johannes.berg@intel.com>
parent d0624be6
...@@ -665,7 +665,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw, ...@@ -665,7 +665,7 @@ static int iwlagn_mac_ampdu_action(struct ieee80211_hw *hw,
ret = iwl_sta_rx_agg_stop(priv, sta, tid); ret = iwl_sta_rx_agg_stop(priv, sta, tid);
break; break;
case IEEE80211_AMPDU_TX_START: case IEEE80211_AMPDU_TX_START:
if (!priv->trans->ops->tx_agg_setup) if (!priv->trans->ops->txq_enable)
break; break;
if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG) if (iwlwifi_mod_params.disable_11n & IWL_DISABLE_HT_TXAGG)
break; break;
......
...@@ -688,8 +688,7 @@ int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif, ...@@ -688,8 +688,7 @@ int iwlagn_tx_agg_oper(struct iwl_priv *priv, struct ieee80211_vif *vif,
fifo = ctx->ac_to_fifo[tid_to_ac[tid]]; fifo = ctx->ac_to_fifo[tid_to_ac[tid]];
iwl_trans_tx_agg_setup(priv->trans, q, fifo, iwl_trans_txq_enable(priv->trans, q, fifo, sta_priv->sta_id, tid,
sta_priv->sta_id, tid,
buf_size, ssn); buf_size, ssn);
/* /*
......
...@@ -355,7 +355,7 @@ struct iwl_trans; ...@@ -355,7 +355,7 @@ struct iwl_trans;
* Must be atomic * Must be atomic
* @reclaim: free packet until ssn. Returns a list of freed packets. * @reclaim: free packet until ssn. Returns a list of freed packets.
* Must be atomic * Must be atomic
* @tx_agg_setup: setup a tx queue for AMPDU - will be called once the HW is * @txq_enable: setup a tx queue for AMPDU - will be called once the HW is
* ready and a successful ADDBA response has been received. * ready and a successful ADDBA response has been received.
* May sleep * May sleep
* @txq_disable: de-configure a Tx queue to send AMPDUs * @txq_disable: de-configure a Tx queue to send AMPDUs
...@@ -391,7 +391,7 @@ struct iwl_trans_ops { ...@@ -391,7 +391,7 @@ struct iwl_trans_ops {
void (*reclaim)(struct iwl_trans *trans, int queue, int ssn, void (*reclaim)(struct iwl_trans *trans, int queue, int ssn,
struct sk_buff_head *skbs); struct sk_buff_head *skbs);
void (*tx_agg_setup)(struct iwl_trans *trans, int queue, int fifo, void (*txq_enable)(struct iwl_trans *trans, int queue, int fifo,
int sta_id, int tid, int frame_limit, u16 ssn); int sta_id, int tid, int frame_limit, u16 ssn);
void (*txq_disable)(struct iwl_trans *trans, int queue); void (*txq_disable)(struct iwl_trans *trans, int queue);
...@@ -551,7 +551,7 @@ static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue) ...@@ -551,7 +551,7 @@ static inline void iwl_trans_txq_disable(struct iwl_trans *trans, int queue)
trans->ops->txq_disable(trans, queue); trans->ops->txq_disable(trans, queue);
} }
static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans, int queue, static inline void iwl_trans_txq_enable(struct iwl_trans *trans, int queue,
int fifo, int sta_id, int tid, int fifo, int sta_id, int tid,
int frame_limit, u16 ssn) int frame_limit, u16 ssn)
{ {
...@@ -560,7 +560,7 @@ static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans, int queue, ...@@ -560,7 +560,7 @@ static inline void iwl_trans_tx_agg_setup(struct iwl_trans *trans, int queue,
WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE, WARN_ONCE(trans->state != IWL_TRANS_FW_ALIVE,
"%s bad state = %d", __func__, trans->state); "%s bad state = %d", __func__, trans->state);
trans->ops->tx_agg_setup(trans, queue, fifo, sta_id, tid, trans->ops->txq_enable(trans, queue, fifo, sta_id, tid,
frame_limit, ssn); frame_limit, ssn);
} }
......
...@@ -344,7 +344,10 @@ void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index); ...@@ -344,7 +344,10 @@ void iwl_trans_set_wr_ptrs(struct iwl_trans *trans, int txq_id, u32 index);
void iwl_trans_tx_queue_set_status(struct iwl_trans *trans, void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
struct iwl_tx_queue *txq, struct iwl_tx_queue *txq,
int tx_fifo_id, bool active); int tx_fifo_id, bool active);
void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int queue, int fifo, void __iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id,
int fifo, int sta_id, int tid,
int frame_limit, u16 ssn);
void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
int sta_id, int tid, int frame_limit, u16 ssn); int sta_id, int tid, int frame_limit, u16 ssn);
void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq, void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_tx_queue *txq,
enum dma_data_direction dma_dir); enum dma_data_direction dma_dir);
......
...@@ -1054,33 +1054,12 @@ static void iwl_tx_start(struct iwl_trans *trans) ...@@ -1054,33 +1054,12 @@ static void iwl_tx_start(struct iwl_trans *trans)
iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
trans_pcie->scd_bc_tbls.dma >> 10); trans_pcie->scd_bc_tbls.dma >> 10);
iwl_write_prph(trans, SCD_QUEUECHAIN_SEL,
SCD_QUEUECHAIN_SEL_ALL(trans, trans_pcie));
iwl_write_prph(trans, SCD_AGGR_SEL, 0);
/* initiate the queues */
for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
iwl_trans_set_wr_ptrs(trans, i, 0);
iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
SCD_CONTEXT_QUEUE_OFFSET(i), 0);
iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
SCD_CONTEXT_QUEUE_OFFSET(i) +
sizeof(u32),
((SCD_WIN_SIZE <<
SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
((SCD_FRAME_LIMIT <<
SCD_QUEUE_CTX_REG2_FRAME_LIMIT_POS) &
SCD_QUEUE_CTX_REG2_FRAME_LIMIT_MSK));
}
for (i = 0; i < trans_pcie->n_q_to_fifo; i++) { for (i = 0; i < trans_pcie->n_q_to_fifo; i++) {
int fifo = trans_pcie->setup_q_to_fifo[i]; int fifo = trans_pcie->setup_q_to_fifo[i];
set_bit(i, trans_pcie->queue_used); __iwl_trans_pcie_txq_enable(trans, i, fifo, IWL_INVALID_STATION,
IWL_TID_NON_QOS,
iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[i], SCD_FRAME_LIMIT, 0);
fifo, true);
} }
/* Activate all Tx DMA/FIFO channels */ /* Activate all Tx DMA/FIFO channels */
...@@ -2040,7 +2019,7 @@ static const struct iwl_trans_ops trans_ops_pcie = { ...@@ -2040,7 +2019,7 @@ static const struct iwl_trans_ops trans_ops_pcie = {
.reclaim = iwl_trans_pcie_reclaim, .reclaim = iwl_trans_pcie_reclaim,
.txq_disable = iwl_trans_pcie_txq_disable, .txq_disable = iwl_trans_pcie_txq_disable,
.tx_agg_setup = iwl_trans_pcie_tx_agg_setup, .txq_enable = iwl_trans_pcie_txq_enable,
.dbgfs_register = iwl_trans_pcie_dbgfs_register, .dbgfs_register = iwl_trans_pcie_dbgfs_register,
......
...@@ -442,29 +442,34 @@ void iwl_trans_tx_queue_set_status(struct iwl_trans *trans, ...@@ -442,29 +442,34 @@ void iwl_trans_tx_queue_set_status(struct iwl_trans *trans,
IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
} }
void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int txq_id, int fifo, void __iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id,
int sta_id, int tid, int frame_limit, u16 ssn) int fifo, int sta_id, int tid,
int frame_limit, u16 ssn)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
unsigned long flags;
u16 ra_tid = BUILD_RAxTID(sta_id, tid); lockdep_assert_held(&trans_pcie->irq_lock);
if (test_and_set_bit(txq_id, trans_pcie->queue_used)) if (test_and_set_bit(txq_id, trans_pcie->queue_used))
WARN_ONCE(1, "queue %d already used - expect issues", txq_id); WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
spin_lock_irqsave(&trans_pcie->irq_lock, flags);
/* Stop this Tx queue before configuring it */ /* Stop this Tx queue before configuring it */
iwlagn_tx_queue_stop_scheduler(trans, txq_id); iwlagn_tx_queue_stop_scheduler(trans, txq_id);
/* Set this queue as a chain-building queue unless it is CMD queue */
if (txq_id != trans_pcie->cmd_queue)
iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id));
/* If this queue is mapped to a certain station: it is an AGG queue */
if (sta_id != IWL_INVALID_STATION) {
u16 ra_tid = BUILD_RAxTID(sta_id, tid);
/* Map receiver-address / traffic-ID to this queue */ /* Map receiver-address / traffic-ID to this queue */
iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id); iwlagn_tx_queue_set_q2ratid(trans, ra_tid, txq_id);
/* Set this queue as a chain-building queue */
iwl_set_bits_prph(trans, SCD_QUEUECHAIN_SEL, BIT(txq_id));
/* enable aggregations for the queue */ /* enable aggregations for the queue */
iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id)); iwl_set_bits_prph(trans, SCD_AGGR_SEL, BIT(txq_id));
}
/* Place first TFD at index corresponding to start sequence number. /* Place first TFD at index corresponding to start sequence number.
* Assumes that ssn_idx is valid (!= 0xFFF) */ * Assumes that ssn_idx is valid (!= 0xFFF) */
...@@ -473,6 +478,8 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int txq_id, int fifo, ...@@ -473,6 +478,8 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int txq_id, int fifo,
iwl_trans_set_wr_ptrs(trans, txq_id, ssn); iwl_trans_set_wr_ptrs(trans, txq_id, ssn);
/* Set up Tx window size and frame limit for this queue */ /* Set up Tx window size and frame limit for this queue */
iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
iwl_write_targ_mem(trans, trans_pcie->scd_base_addr + iwl_write_targ_mem(trans, trans_pcie->scd_base_addr +
SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & ((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
...@@ -483,6 +490,18 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int txq_id, int fifo, ...@@ -483,6 +490,18 @@ void iwl_trans_pcie_tx_agg_setup(struct iwl_trans *trans, int txq_id, int fifo,
/* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */ /* Set up Status area in SRAM, map to Tx DMA/FIFO, activate the queue */
iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id], iwl_trans_tx_queue_set_status(trans, &trans_pcie->txq[txq_id],
fifo, true); fifo, true);
}
void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
int sta_id, int tid, int frame_limit, u16 ssn)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
unsigned long flags;
spin_lock_irqsave(&trans_pcie->irq_lock, flags);
__iwl_trans_pcie_txq_enable(trans, txq_id, fifo, sta_id,
tid, frame_limit, ssn);
spin_unlock_irqrestore(&trans_pcie->irq_lock, flags); spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment