Commit 765952e4 authored by Rajkumar Manoharan's avatar Rajkumar Manoharan Committed by Kalle Valo

ath10k: cleanup copy engine send completion

The physical address necessary to unmap DMA ('bufferp') is stored
in ath10k_skb_cb as 'paddr'. ath10k doesn't rely on the meta/transfer_id
when handling send completion (htc ep id is stored in sk_buff control
buffer). So the unused output arguments {bufferp, nbytesp and transfer_idp}
are removed from CE send completion. This change is needed before removing
the shadow copy of copy engine (CE) descriptors in follow up patch.
Signed-off-by: default avatarRajkumar Manoharan <rmanohar@qti.qualcomm.com>
Signed-off-by: default avatarKalle Valo <kvalo@qca.qualcomm.com>
parent 1e8f86d9
...@@ -578,17 +578,13 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state, ...@@ -578,17 +578,13 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
* The caller takes responsibility for any necessary locking. * The caller takes responsibility for any necessary locking.
*/ */
int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp, void **per_transfer_contextp)
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp)
{ {
struct ath10k_ce_ring *src_ring = ce_state->src_ring; struct ath10k_ce_ring *src_ring = ce_state->src_ring;
u32 ctrl_addr = ce_state->ctrl_addr; u32 ctrl_addr = ce_state->ctrl_addr;
struct ath10k *ar = ce_state->ar; struct ath10k *ar = ce_state->ar;
unsigned int nentries_mask = src_ring->nentries_mask; unsigned int nentries_mask = src_ring->nentries_mask;
unsigned int sw_index = src_ring->sw_index; unsigned int sw_index = src_ring->sw_index;
struct ce_desc *sdesc, *sbase;
unsigned int read_index; unsigned int read_index;
if (src_ring->hw_index == sw_index) { if (src_ring->hw_index == sw_index) {
...@@ -613,15 +609,6 @@ int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, ...@@ -613,15 +609,6 @@ int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
if (read_index == sw_index) if (read_index == sw_index)
return -EIO; return -EIO;
sbase = src_ring->base_addr_owner_space;
sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
/* Return data from completed source descriptor */
*bufferp = __le32_to_cpu(sdesc->addr);
*nbytesp = __le16_to_cpu(sdesc->nbytes);
*transfer_idp = MS(__le16_to_cpu(sdesc->flags),
CE_DESC_FLAGS_META_DATA);
if (per_transfer_contextp) if (per_transfer_contextp)
*per_transfer_contextp = *per_transfer_contextp =
src_ring->per_transfer_context[sw_index]; src_ring->per_transfer_context[sw_index];
...@@ -696,10 +683,7 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state, ...@@ -696,10 +683,7 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
} }
int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state, int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp, void **per_transfer_contextp)
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp)
{ {
struct ath10k *ar = ce_state->ar; struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
...@@ -707,9 +691,7 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state, ...@@ -707,9 +691,7 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
spin_lock_bh(&ar_pci->ce_lock); spin_lock_bh(&ar_pci->ce_lock);
ret = ath10k_ce_completed_send_next_nolock(ce_state, ret = ath10k_ce_completed_send_next_nolock(ce_state,
per_transfer_contextp, per_transfer_contextp);
bufferp, nbytesp,
transfer_idp);
spin_unlock_bh(&ar_pci->ce_lock); spin_unlock_bh(&ar_pci->ce_lock);
return ret; return ret;
......
...@@ -192,16 +192,10 @@ int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state, ...@@ -192,16 +192,10 @@ int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
* Pops 1 completed send buffer from Source ring. * Pops 1 completed send buffer from Source ring.
*/ */
int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state, int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp, void **per_transfer_contextp);
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp);
int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state, int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp, void **per_transfer_contextp);
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp);
/*==================CE Engine Initialization=======================*/ /*==================CE Engine Initialization=======================*/
......
...@@ -910,9 +910,8 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data, ...@@ -910,9 +910,8 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
goto done; goto done;
i = 0; i = 0;
while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf, while (ath10k_ce_completed_send_next_nolock(ce_diag,
&completed_nbytes, NULL) != 0) {
&id) != 0) {
mdelay(1); mdelay(1);
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
ret = -EBUSY; ret = -EBUSY;
...@@ -1073,9 +1072,8 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address, ...@@ -1073,9 +1072,8 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
goto done; goto done;
i = 0; i = 0;
while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf, while (ath10k_ce_completed_send_next_nolock(ce_diag,
&completed_nbytes, NULL) != 0) {
&id) != 0) {
mdelay(1); mdelay(1);
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) { if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
...@@ -1139,13 +1137,9 @@ static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state) ...@@ -1139,13 +1137,9 @@ static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
struct ath10k *ar = ce_state->ar; struct ath10k *ar = ce_state->ar;
struct sk_buff_head list; struct sk_buff_head list;
struct sk_buff *skb; struct sk_buff *skb;
u32 ce_data;
unsigned int nbytes;
unsigned int transfer_id;
__skb_queue_head_init(&list); __skb_queue_head_init(&list);
while (ath10k_ce_completed_send_next(ce_state, (void **)&skb, &ce_data, while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
&nbytes, &transfer_id) == 0) {
/* no need to call tx completion for NULL pointers */ /* no need to call tx completion for NULL pointers */
if (skb == NULL) if (skb == NULL)
continue; continue;
...@@ -1215,12 +1209,8 @@ static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state) ...@@ -1215,12 +1209,8 @@ static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
{ {
struct ath10k *ar = ce_state->ar; struct ath10k *ar = ce_state->ar;
struct sk_buff *skb; struct sk_buff *skb;
u32 ce_data;
unsigned int nbytes;
unsigned int transfer_id;
while (ath10k_ce_completed_send_next(ce_state, (void **)&skb, &ce_data, while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
&nbytes, &transfer_id) == 0) {
/* no need to call tx completion for NULL pointers */ /* no need to call tx completion for NULL pointers */
if (!skb) if (!skb)
continue; continue;
...@@ -1796,12 +1786,8 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar, ...@@ -1796,12 +1786,8 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state) static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
{ {
struct bmi_xfer *xfer; struct bmi_xfer *xfer;
u32 ce_data;
unsigned int nbytes;
unsigned int transfer_id;
if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data, if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer))
&nbytes, &transfer_id))
return; return;
xfer->tx_done = true; xfer->tx_done = true;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment