Commit deba1b9e authored by Rajkumar Manoharan's avatar Rajkumar Manoharan Committed by Kalle Valo

ath10k: unify rx processing in napi_poll

With current NAPI implementation, NAPI poll can deliver more frames
to net core than allotted budget. This may cause warning in napi_poll.
Remaining quota is not accounted, while processing amsdus in
rx_in_ord_ind and rx_ind queue. Adding num_msdus at last can not
prevent delivering more frames to net core. With this change,
all amdus from both in_ord_ind and rx_ind queues are processed and
enqueued into common skb list instead of delivering into mac80211.
Later msdus from common queue are dequeued and delivered depends on
quota availability. This change also simplifies the rx processing in
napi poll routine.
Signed-off-by: default avatarRajkumar Manoharan <rmanohar@qti.qualcomm.com>
Signed-off-by: default avatarKalle Valo <kvalo@qca.qualcomm.com>
parent 07ffb449
...@@ -67,7 +67,6 @@ ...@@ -67,7 +67,6 @@
/* NAPI poll budget */ /* NAPI poll budget */
#define ATH10K_NAPI_BUDGET 64 #define ATH10K_NAPI_BUDGET 64
#define ATH10K_NAPI_QUOTA_LIMIT 60
/* SMBIOS type containing Board Data File Name Extension */ /* SMBIOS type containing Board Data File Name Extension */
#define ATH10K_SMBIOS_BDF_EXT_TYPE 0xF8 #define ATH10K_SMBIOS_BDF_EXT_TYPE 0xF8
......
...@@ -1695,7 +1695,7 @@ struct ath10k_htt { ...@@ -1695,7 +1695,7 @@ struct ath10k_htt {
/* This is used to group tx/rx completions separately and process them /* This is used to group tx/rx completions separately and process them
* in batches to reduce cache stalls * in batches to reduce cache stalls
*/ */
struct sk_buff_head rx_compl_q; struct sk_buff_head rx_msdus_q;
struct sk_buff_head rx_in_ord_compl_q; struct sk_buff_head rx_in_ord_compl_q;
struct sk_buff_head tx_fetch_ind_q; struct sk_buff_head tx_fetch_ind_q;
......
...@@ -227,7 +227,7 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt) ...@@ -227,7 +227,7 @@ void ath10k_htt_rx_free(struct ath10k_htt *htt)
{ {
del_timer_sync(&htt->rx_ring.refill_retry_timer); del_timer_sync(&htt->rx_ring.refill_retry_timer);
skb_queue_purge(&htt->rx_compl_q); skb_queue_purge(&htt->rx_msdus_q);
skb_queue_purge(&htt->rx_in_ord_compl_q); skb_queue_purge(&htt->rx_in_ord_compl_q);
skb_queue_purge(&htt->tx_fetch_ind_q); skb_queue_purge(&htt->tx_fetch_ind_q);
...@@ -515,7 +515,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt) ...@@ -515,7 +515,7 @@ int ath10k_htt_rx_alloc(struct ath10k_htt *htt)
htt->rx_ring.sw_rd_idx.msdu_payld = 0; htt->rx_ring.sw_rd_idx.msdu_payld = 0;
hash_init(htt->rx_ring.skb_table); hash_init(htt->rx_ring.skb_table);
skb_queue_head_init(&htt->rx_compl_q); skb_queue_head_init(&htt->rx_msdus_q);
skb_queue_head_init(&htt->rx_in_ord_compl_q); skb_queue_head_init(&htt->rx_in_ord_compl_q);
skb_queue_head_init(&htt->tx_fetch_ind_q); skb_queue_head_init(&htt->tx_fetch_ind_q);
atomic_set(&htt->num_mpdus_ready, 0); atomic_set(&htt->num_mpdus_ready, 0);
...@@ -974,16 +974,25 @@ static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size) ...@@ -974,16 +974,25 @@ static char *ath10k_get_tid(struct ieee80211_hdr *hdr, char *out, size_t size)
return out; return out;
} }
static void ath10k_process_rx(struct ath10k *ar, static void ath10k_htt_rx_h_queue_msdu(struct ath10k *ar,
struct ieee80211_rx_status *rx_status, struct ieee80211_rx_status *rx_status,
struct sk_buff *skb) struct sk_buff *skb)
{
struct ieee80211_rx_status *status;
status = IEEE80211_SKB_RXCB(skb);
*status = *rx_status;
__skb_queue_tail(&ar->htt.rx_msdus_q, skb);
}
static void ath10k_process_rx(struct ath10k *ar, struct sk_buff *skb)
{ {
struct ieee80211_rx_status *status; struct ieee80211_rx_status *status;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
char tid[32]; char tid[32];
status = IEEE80211_SKB_RXCB(skb); status = IEEE80211_SKB_RXCB(skb);
*status = *rx_status;
ath10k_dbg(ar, ATH10K_DBG_DATA, ath10k_dbg(ar, ATH10K_DBG_DATA,
"rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n", "rx skb %pK len %u peer %pM %s %s sn %u %s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
...@@ -1517,7 +1526,7 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar, ...@@ -1517,7 +1526,7 @@ static void ath10k_htt_rx_h_mpdu(struct ath10k *ar,
} }
} }
static void ath10k_htt_rx_h_deliver(struct ath10k *ar, static void ath10k_htt_rx_h_enqueue(struct ath10k *ar,
struct sk_buff_head *amsdu, struct sk_buff_head *amsdu,
struct ieee80211_rx_status *status) struct ieee80211_rx_status *status)
{ {
...@@ -1540,7 +1549,7 @@ static void ath10k_htt_rx_h_deliver(struct ath10k *ar, ...@@ -1540,7 +1549,7 @@ static void ath10k_htt_rx_h_deliver(struct ath10k *ar,
status->flag |= RX_FLAG_ALLOW_SAME_PN; status->flag |= RX_FLAG_ALLOW_SAME_PN;
} }
ath10k_process_rx(ar, status, msdu); ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
} }
} }
...@@ -1652,7 +1661,7 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt) ...@@ -1652,7 +1661,7 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
struct ath10k *ar = htt->ar; struct ath10k *ar = htt->ar;
struct ieee80211_rx_status *rx_status = &htt->rx_status; struct ieee80211_rx_status *rx_status = &htt->rx_status;
struct sk_buff_head amsdu; struct sk_buff_head amsdu;
int ret, num_msdus; int ret;
__skb_queue_head_init(&amsdu); __skb_queue_head_init(&amsdu);
...@@ -1674,7 +1683,6 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt) ...@@ -1674,7 +1683,6 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
return ret; return ret;
} }
num_msdus = skb_queue_len(&amsdu);
ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff); ath10k_htt_rx_h_ppdu(ar, &amsdu, rx_status, 0xffff);
/* only for ret = 1 indicates chained msdus */ /* only for ret = 1 indicates chained msdus */
...@@ -1683,9 +1691,9 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt) ...@@ -1683,9 +1691,9 @@ static int ath10k_htt_rx_handle_amsdu(struct ath10k_htt *htt)
ath10k_htt_rx_h_filter(ar, &amsdu, rx_status); ath10k_htt_rx_h_filter(ar, &amsdu, rx_status);
ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true); ath10k_htt_rx_h_mpdu(ar, &amsdu, rx_status, true);
ath10k_htt_rx_h_deliver(ar, &amsdu, rx_status); ath10k_htt_rx_h_enqueue(ar, &amsdu, rx_status);
return num_msdus; return 0;
} }
static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt, static void ath10k_htt_rx_proc_rx_ind(struct ath10k_htt *htt,
...@@ -1893,7 +1901,7 @@ static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status, ...@@ -1893,7 +1901,7 @@ static void ath10k_htt_rx_h_rx_offload_prot(struct ieee80211_rx_status *status,
RX_FLAG_MMIC_STRIPPED; RX_FLAG_MMIC_STRIPPED;
} }
static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar, static void ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
struct sk_buff_head *list) struct sk_buff_head *list)
{ {
struct ath10k_htt *htt = &ar->htt; struct ath10k_htt *htt = &ar->htt;
...@@ -1901,7 +1909,6 @@ static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar, ...@@ -1901,7 +1909,6 @@ static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
struct htt_rx_offload_msdu *rx; struct htt_rx_offload_msdu *rx;
struct sk_buff *msdu; struct sk_buff *msdu;
size_t offset; size_t offset;
int num_msdu = 0;
while ((msdu = __skb_dequeue(list))) { while ((msdu = __skb_dequeue(list))) {
/* Offloaded frames don't have Rx descriptor. Instead they have /* Offloaded frames don't have Rx descriptor. Instead they have
...@@ -1940,10 +1947,8 @@ static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar, ...@@ -1940,10 +1947,8 @@ static int ath10k_htt_rx_h_rx_offload(struct ath10k *ar,
ath10k_htt_rx_h_rx_offload_prot(status, msdu); ath10k_htt_rx_h_rx_offload_prot(status, msdu);
ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id); ath10k_htt_rx_h_channel(ar, status, NULL, rx->vdev_id);
ath10k_process_rx(ar, status, msdu); ath10k_htt_rx_h_queue_msdu(ar, status, msdu);
num_msdu++;
} }
return num_msdu;
} }
static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
...@@ -1959,7 +1964,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) ...@@ -1959,7 +1964,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
u8 tid; u8 tid;
bool offload; bool offload;
bool frag; bool frag;
int ret, num_msdus = 0; int ret;
lockdep_assert_held(&htt->rx_ring.lock); lockdep_assert_held(&htt->rx_ring.lock);
...@@ -2001,7 +2006,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) ...@@ -2001,7 +2006,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
* separately. * separately.
*/ */
if (offload) if (offload)
num_msdus = ath10k_htt_rx_h_rx_offload(ar, &list); ath10k_htt_rx_h_rx_offload(ar, &list);
while (!skb_queue_empty(&list)) { while (!skb_queue_empty(&list)) {
__skb_queue_head_init(&amsdu); __skb_queue_head_init(&amsdu);
...@@ -2014,11 +2019,10 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) ...@@ -2014,11 +2019,10 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
* better to report something than nothing though. This * better to report something than nothing though. This
* should still give an idea about rx rate to the user. * should still give an idea about rx rate to the user.
*/ */
num_msdus += skb_queue_len(&amsdu);
ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id); ath10k_htt_rx_h_ppdu(ar, &amsdu, status, vdev_id);
ath10k_htt_rx_h_filter(ar, &amsdu, status); ath10k_htt_rx_h_filter(ar, &amsdu, status);
ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false); ath10k_htt_rx_h_mpdu(ar, &amsdu, status, false);
ath10k_htt_rx_h_deliver(ar, &amsdu, status); ath10k_htt_rx_h_enqueue(ar, &amsdu, status);
break; break;
case -EAGAIN: case -EAGAIN:
/* fall through */ /* fall through */
...@@ -2030,7 +2034,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb) ...@@ -2030,7 +2034,7 @@ static int ath10k_htt_rx_in_ord_ind(struct ath10k *ar, struct sk_buff *skb)
return -EIO; return -EIO;
} }
} }
return num_msdus; return ret;
} }
static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar, static void ath10k_htt_rx_tx_fetch_resp_id_confirm(struct ath10k *ar,
...@@ -2631,6 +2635,24 @@ void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar, ...@@ -2631,6 +2635,24 @@ void ath10k_htt_rx_pktlog_completion_handler(struct ath10k *ar,
} }
EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler); EXPORT_SYMBOL(ath10k_htt_rx_pktlog_completion_handler);
static int ath10k_htt_rx_deliver_msdu(struct ath10k *ar, int quota, int budget)
{
struct sk_buff *skb;
while (quota < budget) {
if (skb_queue_empty(&ar->htt.rx_msdus_q))
break;
skb = __skb_dequeue(&ar->htt.rx_msdus_q);
if (!skb)
break;
ath10k_process_rx(ar, skb);
quota++;
}
return quota;
}
int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget) int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
{ {
struct ath10k_htt *htt = &ar->htt; struct ath10k_htt *htt = &ar->htt;
...@@ -2638,63 +2660,44 @@ int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget) ...@@ -2638,63 +2660,44 @@ int ath10k_htt_txrx_compl_task(struct ath10k *ar, int budget)
struct sk_buff_head tx_ind_q; struct sk_buff_head tx_ind_q;
struct sk_buff *skb; struct sk_buff *skb;
unsigned long flags; unsigned long flags;
int quota = 0, done, num_rx_msdus; int quota = 0, done, ret;
bool resched_napi = false; bool resched_napi = false;
__skb_queue_head_init(&tx_ind_q); __skb_queue_head_init(&tx_ind_q);
/* Since in-ord-ind can deliver more than 1 A-MSDU in single event, /* Process pending frames before dequeuing more data
* process it first to utilize full available quota. * from hardware.
*/ */
while (quota < budget) { quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
if (skb_queue_empty(&htt->rx_in_ord_compl_q)) if (quota == budget) {
break;
skb = __skb_dequeue(&htt->rx_in_ord_compl_q);
if (!skb) {
resched_napi = true; resched_napi = true;
goto exit; goto exit;
} }
while ((skb = __skb_dequeue(&htt->rx_in_ord_compl_q))) {
spin_lock_bh(&htt->rx_ring.lock); spin_lock_bh(&htt->rx_ring.lock);
num_rx_msdus = ath10k_htt_rx_in_ord_ind(ar, skb); ret = ath10k_htt_rx_in_ord_ind(ar, skb);
spin_unlock_bh(&htt->rx_ring.lock); spin_unlock_bh(&htt->rx_ring.lock);
if (num_rx_msdus < 0) {
resched_napi = true;
goto exit;
}
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
if (num_rx_msdus > 0) if (ret == -EIO) {
quota += num_rx_msdus;
if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
!skb_queue_empty(&htt->rx_in_ord_compl_q)) {
resched_napi = true; resched_napi = true;
goto exit; goto exit;
} }
} }
while (quota < budget) { while (atomic_read(&htt->num_mpdus_ready)) {
/* no more data to receive */ ret = ath10k_htt_rx_handle_amsdu(htt);
if (!atomic_read(&htt->num_mpdus_ready)) if (ret == -EIO) {
break;
num_rx_msdus = ath10k_htt_rx_handle_amsdu(htt);
if (num_rx_msdus < 0) {
resched_napi = true; resched_napi = true;
goto exit; goto exit;
} }
quota += num_rx_msdus;
atomic_dec(&htt->num_mpdus_ready); atomic_dec(&htt->num_mpdus_ready);
if ((quota > ATH10K_NAPI_QUOTA_LIMIT) &&
atomic_read(&htt->num_mpdus_ready)) {
resched_napi = true;
goto exit;
}
} }
/* Deliver received data after processing data from hardware */
quota = ath10k_htt_rx_deliver_msdu(ar, quota, budget);
/* From NAPI documentation: /* From NAPI documentation:
* The napi poll() function may also process TX completions, in which * The napi poll() function may also process TX completions, in which
* case if it processes the entire TX ring then it should count that * case if it processes the entire TX ring then it should count that
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment