Commit f6d497cd authored by Emmanuel Grumbach's avatar Emmanuel Grumbach Committed by Johannes Berg

iwlwifi: merge 2 functions in reclaim flow

One one just a wrapper of the second, squash them.
Signed-off-by: default avatarEmmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: default avatarJohannes Berg <johannes.berg@intel.com>
parent f02831be
...@@ -892,39 +892,45 @@ static inline void iwl_pcie_txq_progress(struct iwl_trans_pcie *trans_pcie, ...@@ -892,39 +892,45 @@ static inline void iwl_pcie_txq_progress(struct iwl_trans_pcie *trans_pcie,
} }
/* Frees buffers until index _not_ inclusive */ /* Frees buffers until index _not_ inclusive */
static int iwl_pcie_txq_reclaim(struct iwl_trans *trans, int txq_id, int index, void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs) struct sk_buff_head *skbs)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = &trans_pcie->txq[txq_id]; struct iwl_txq *txq = &trans_pcie->txq[txq_id];
/* n_bd is usually 256 => n_bd - 1 = 0xff */
int tfd_num = ssn & (txq->q.n_bd - 1);
struct iwl_queue *q = &txq->q; struct iwl_queue *q = &txq->q;
int last_to_free; int last_to_free;
int freed = 0;
/* This function is not meant to release cmd queue*/ /* This function is not meant to release cmd queue*/
if (WARN_ON(txq_id == trans_pcie->cmd_queue)) if (WARN_ON(txq_id == trans_pcie->cmd_queue))
return 0; return;
lockdep_assert_held(&txq->lock); spin_lock(&txq->lock);
if (txq->q.read_ptr == tfd_num)
goto out;
IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
txq_id, txq->q.read_ptr, tfd_num, ssn);
/*Since we free until index _not_ inclusive, the one before index is /*Since we free until index _not_ inclusive, the one before index is
* the last we will free. This one must be used */ * the last we will free. This one must be used */
last_to_free = iwl_queue_dec_wrap(index, q->n_bd); last_to_free = iwl_queue_dec_wrap(tfd_num, q->n_bd);
if ((index >= q->n_bd) || if (iwl_queue_used(q, last_to_free) == 0) {
(iwl_queue_used(q, last_to_free) == 0)) {
IWL_ERR(trans, IWL_ERR(trans,
"%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
__func__, txq_id, last_to_free, q->n_bd, __func__, txq_id, last_to_free, q->n_bd,
q->write_ptr, q->read_ptr); q->write_ptr, q->read_ptr);
return 0; goto out;
} }
if (WARN_ON(!skb_queue_empty(skbs))) if (WARN_ON(!skb_queue_empty(skbs)))
return 0; goto out;
for (; for (;
q->read_ptr != index; q->read_ptr != tfd_num;
q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL)) if (WARN_ON_ONCE(txq->entries[txq->q.read_ptr].skb == NULL))
...@@ -937,32 +943,13 @@ static int iwl_pcie_txq_reclaim(struct iwl_trans *trans, int txq_id, int index, ...@@ -937,32 +943,13 @@ static int iwl_pcie_txq_reclaim(struct iwl_trans *trans, int txq_id, int index,
iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq); iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
iwl_pcie_txq_free_tfd(trans, txq, DMA_TO_DEVICE); iwl_pcie_txq_free_tfd(trans, txq, DMA_TO_DEVICE);
freed++;
} }
iwl_pcie_txq_progress(trans_pcie, txq); iwl_pcie_txq_progress(trans_pcie, txq);
return freed;
}
void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
struct sk_buff_head *skbs)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
/* n_bd is usually 256 => n_bd - 1 = 0xff */
int tfd_num = ssn & (txq->q.n_bd - 1);
spin_lock(&txq->lock);
if (txq->q.read_ptr != tfd_num) {
IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
txq_id, txq->q.read_ptr, tfd_num, ssn);
iwl_pcie_txq_reclaim(trans, txq_id, tfd_num, skbs);
if (iwl_queue_space(&txq->q) > txq->q.low_mark) if (iwl_queue_space(&txq->q) > txq->q.low_mark)
iwl_wake_queue(trans, txq); iwl_wake_queue(trans, txq);
} out:
spin_unlock(&txq->lock); spin_unlock(&txq->lock);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment