Commit a0632004 authored by Johannes Berg's avatar Johannes Berg

wifi: iwlwifi: pcie: clean up gen1/gen2 TFD unmap

This is a bit messy right now, there are functions for both,
but then gen1 function can actually deal with both gen1 and
gen2, due to the confusion about use_tfh/gen2 cleaned up in
the previous patch.

Fix the common paths to call the right functions and remove
handling of gen2 from the gen1 function.
Signed-off-by: default avatarJohannes Berg <johannes.berg@intel.com>
Signed-off-by: default avatarGregory Greenman <gregory.greenman@intel.com>
Link: https://lore.kernel.org/r/20230816104355.baf23841ec5c.I40702e94b25db05e82f935f14548316f8c6429b9@changeidSigned-off-by: default avatarJohannes Berg <johannes.berg@intel.com>
parent d9d115fe
...@@ -1203,7 +1203,11 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans, ...@@ -1203,7 +1203,11 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
group_id = cmd->hdr.group_id; group_id = cmd->hdr.group_id;
cmd_id = WIDE_ID(group_id, cmd->hdr.cmd); cmd_id = WIDE_ID(group_id, cmd->hdr.cmd);
iwl_txq_gen1_tfd_unmap(trans, meta, txq, index); if (trans->trans_cfg->gen2)
iwl_txq_gen2_tfd_unmap(trans, meta,
iwl_txq_get_tfd(trans, txq, index));
else
iwl_txq_gen1_tfd_unmap(trans, meta, txq, index);
/* Input error checking is done when commands are added to queue. */ /* Input error checking is done when commands are added to queue. */
if (meta->flags & CMD_WANT_SKB) { if (meta->flags & CMD_WANT_SKB) {
......
...@@ -1340,22 +1340,12 @@ int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size) ...@@ -1340,22 +1340,12 @@ int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size)
} }
static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans, static inline dma_addr_t iwl_txq_gen1_tfd_tb_get_addr(struct iwl_trans *trans,
void *_tfd, u8 idx) struct iwl_tfd *tfd, u8 idx)
{ {
struct iwl_tfd *tfd; struct iwl_tfd_tb *tb = &tfd->tbs[idx];
struct iwl_tfd_tb *tb;
dma_addr_t addr; dma_addr_t addr;
dma_addr_t hi_len; dma_addr_t hi_len;
if (trans->trans_cfg->gen2) {
struct iwl_tfh_tfd *tfh_tfd = _tfd;
struct iwl_tfh_tb *tfh_tb = &tfh_tfd->tbs[idx];
return (dma_addr_t)(le64_to_cpu(tfh_tb->addr));
}
tfd = _tfd;
tb = &tfd->tbs[idx];
addr = get_unaligned_le32(&tb->lo); addr = get_unaligned_le32(&tb->lo);
if (sizeof(dma_addr_t) <= sizeof(u32)) if (sizeof(dma_addr_t) <= sizeof(u32))
...@@ -1376,7 +1366,7 @@ void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans, ...@@ -1376,7 +1366,7 @@ void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
struct iwl_txq *txq, int index) struct iwl_txq *txq, int index)
{ {
int i, num_tbs; int i, num_tbs;
void *tfd = iwl_txq_get_tfd(trans, txq, index); struct iwl_tfd *tfd = iwl_txq_get_tfd(trans, txq, index);
/* Sanity check on number of chunks */ /* Sanity check on number of chunks */
num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd); num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd);
...@@ -1408,15 +1398,7 @@ void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans, ...@@ -1408,15 +1398,7 @@ void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
meta->tbs = 0; meta->tbs = 0;
if (trans->trans_cfg->gen2) { tfd->num_tbs = 0;
struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
tfd_fh->num_tbs = 0;
} else {
struct iwl_tfd *tfd_fh = (void *)tfd;
tfd_fh->num_tbs = 0;
}
} }
#define IWL_TX_CRC_SIZE 4 #define IWL_TX_CRC_SIZE 4
...@@ -1520,7 +1502,12 @@ void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq) ...@@ -1520,7 +1502,12 @@ void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
/* We have only q->n_window txq->entries, but we use /* We have only q->n_window txq->entries, but we use
* TFD_QUEUE_SIZE_MAX tfds * TFD_QUEUE_SIZE_MAX tfds
*/ */
iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr); if (trans->trans_cfg->gen2)
iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
iwl_txq_get_tfd(trans, txq, rd_ptr));
else
iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta,
txq, rd_ptr);
/* free SKB */ /* free SKB */
skb = txq->entries[idx].skb; skb = txq->entries[idx].skb;
......
...@@ -131,17 +131,8 @@ struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len, ...@@ -131,17 +131,8 @@ struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len,
struct sk_buff *skb); struct sk_buff *skb);
#endif #endif
static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_trans *trans, static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_trans *trans,
void *_tfd) struct iwl_tfd *tfd)
{ {
struct iwl_tfd *tfd;
if (trans->trans_cfg->gen2) {
struct iwl_tfh_tfd *tfh_tfd = _tfd;
return le16_to_cpu(tfh_tfd->num_tbs) & 0x1f;
}
tfd = (struct iwl_tfd *)_tfd;
return tfd->num_tbs & 0x1f; return tfd->num_tbs & 0x1f;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment