Commit 4b6dfc75 authored by Dinesh Karthikeyan's avatar Dinesh Karthikeyan Committed by Kalle Valo

wifi: ath12k: Support Transmit Scheduler stats

Add support to request transmission scheduler stats from firmware through
HTT stats type 4. This stat gives information such as pdev stats
scheduled per transmission queue, transmission queue command posted and
reaped, scheduler order, scheduler ineligibility and supercycle triggers.

Sample output:
-------------
echo 4 > /sys/kernel/debug/ath12k/pci-0000\:06\:00.0/mac0/htt_stats_type
cat /sys/kernel/debug/ath12k/pci-0000\:06\:00.0/mac0/htt_stats
HTT_STATS_TX_SCHED_CMN_TLV:
mac_id = 0
current_timestamp = 952546828

HTT_TX_PDEV_STATS_SCHED_PER_TXQ_TLV:
mac_id = 0
txq_id = 14
sched_policy = 2
.....

HTT_SCHED_TXQ_CMD_POSTED_TLV:
sched_cmd_posted =  0:0, 1:0, 2:0, 3:0, 4:0, 5:0, 6:0, 7:0, 8:0, 9:0,
10:0, 11:0, 12:0

HTT_SCHED_TXQ_CMD_REAPED_TLV:
sched_cmd_reaped =  0:0, 1:0, 2:0, 3:0, 4:0, 5:0, 6:0, 7:0, 8:0, 9:0,
10:0, 11:0, 12:0

HTT_SCHED_TXQ_SCHED_ORDER_SU_TLV:
sched_order_su =  0:0, 1:0, 2:0, 3:0, 4:0, 5:0, 6:0, 7:0, 8:0, 9:0, 10:0,
11:0, 12:0, 13:0, 14:0, 15:0, 16:0, 17:0, 18:0, 19:0

HTT_SCHED_TXQ_SCHED_INELIGIBILITY:
sched_ineligibility =  0:0, 1:0, 2:0, 3:0, 4:0, 5:0, 6:0, 7:0, 8:0, 9:0,
10:0, 11:0, 12:0, 13:0, 14:0, 15:0, 16:0, 17:0, 18:0, 19:0, 20:0, 21:0,
22:0, 23:0, 24:0, 25:0, 26:0, 27:0, 28:0, 29:0, 30:0, 31:0, 32:0, 33:0,
34:0, 35:0

HTT_SCHED_TXQ_SUPERCYCLE_TRIGGER:
supercycle_triggers =  0:0, 1:0, 2:0, 3:0, 4:0, 5:0, 6:0

Tested-on: QCN9274 hw2.0 PCI WLAN.WBE.1.0.1-00029-QCAHKSWPL_SILICONZ-1
Tested-on: WCN7850 hw2.0 PCI WLAN.HMT.1.0-03427-QCAHMTSWPL_V1.0_V2.0_SILICONZ-1.15378.4
Signed-off-by: default avatarDinesh Karthikeyan <quic_dinek@quicinc.com>
Signed-off-by: default avatarRoopni Devanathan <quic_rdevanat@quicinc.com>
Acked-by: default avatarJeff Johnson <quic_jjohnson@quicinc.com>
Signed-off-by: default avatarKalle Valo <quic_kvalo@quicinc.com>
Link: https://patch.msgid.link/20240625042217.1303332-2-quic_rdevanat@quicinc.com
parent a9f2976f
...@@ -386,6 +386,216 @@ htt_print_pdev_ctrl_path_tx_stats_tlv(const void *tag_buf, u16 tag_len, ...@@ -386,6 +386,216 @@ htt_print_pdev_ctrl_path_tx_stats_tlv(const void *tag_buf, u16 tag_len,
stats_req->buf_len = len; stats_req->buf_len = len;
} }
static void
ath12k_htt_print_stats_tx_sched_cmn_tlv(const void *tag_buf,
u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct ath12k_htt_stats_tx_sched_cmn_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
u32 mac_id_word;
if (tag_len < sizeof(*htt_stats_buf))
return;
mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word);
len += scnprintf(buf + len, buf_len - len, "HTT_STATS_TX_SCHED_CMN_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
u32_get_bits(mac_id_word, ATH12K_HTT_STATS_MAC_ID));
len += scnprintf(buf + len, buf_len - len, "current_timestamp = %u\n\n",
le32_to_cpu(htt_stats_buf->current_timestamp));
stats_req->buf_len = len;
}
static void
ath12k_htt_print_tx_pdev_stats_sched_per_txq_tlv(const void *tag_buf,
u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct ath12k_htt_tx_pdev_stats_sched_per_txq_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
u32 mac_id_word;
if (tag_len < sizeof(*htt_stats_buf))
return;
mac_id_word = __le32_to_cpu(htt_stats_buf->mac_id__word);
len += scnprintf(buf + len, buf_len - len,
"HTT_TX_PDEV_STATS_SCHED_PER_TXQ_TLV:\n");
len += scnprintf(buf + len, buf_len - len, "mac_id = %u\n",
u32_get_bits(mac_id_word,
ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID));
len += scnprintf(buf + len, buf_len - len, "txq_id = %u\n",
u32_get_bits(mac_id_word,
ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID));
len += scnprintf(buf + len, buf_len - len, "sched_policy = %u\n",
le32_to_cpu(htt_stats_buf->sched_policy));
len += scnprintf(buf + len, buf_len - len,
"last_sched_cmd_posted_timestamp = %u\n",
le32_to_cpu(htt_stats_buf->last_sched_cmd_posted_timestamp));
len += scnprintf(buf + len, buf_len - len,
"last_sched_cmd_compl_timestamp = %u\n",
le32_to_cpu(htt_stats_buf->last_sched_cmd_compl_timestamp));
len += scnprintf(buf + len, buf_len - len, "sched_2_tac_lwm_count = %u\n",
le32_to_cpu(htt_stats_buf->sched_2_tac_lwm_count));
len += scnprintf(buf + len, buf_len - len, "sched_2_tac_ring_full = %u\n",
le32_to_cpu(htt_stats_buf->sched_2_tac_ring_full));
len += scnprintf(buf + len, buf_len - len, "sched_cmd_post_failure = %u\n",
le32_to_cpu(htt_stats_buf->sched_cmd_post_failure));
len += scnprintf(buf + len, buf_len - len, "num_active_tids = %u\n",
le32_to_cpu(htt_stats_buf->num_active_tids));
len += scnprintf(buf + len, buf_len - len, "num_ps_schedules = %u\n",
le32_to_cpu(htt_stats_buf->num_ps_schedules));
len += scnprintf(buf + len, buf_len - len, "sched_cmds_pending = %u\n",
le32_to_cpu(htt_stats_buf->sched_cmds_pending));
len += scnprintf(buf + len, buf_len - len, "num_tid_register = %u\n",
le32_to_cpu(htt_stats_buf->num_tid_register));
len += scnprintf(buf + len, buf_len - len, "num_tid_unregister = %u\n",
le32_to_cpu(htt_stats_buf->num_tid_unregister));
len += scnprintf(buf + len, buf_len - len, "num_qstats_queried = %u\n",
le32_to_cpu(htt_stats_buf->num_qstats_queried));
len += scnprintf(buf + len, buf_len - len, "qstats_update_pending = %u\n",
le32_to_cpu(htt_stats_buf->qstats_update_pending));
len += scnprintf(buf + len, buf_len - len, "last_qstats_query_timestamp = %u\n",
le32_to_cpu(htt_stats_buf->last_qstats_query_timestamp));
len += scnprintf(buf + len, buf_len - len, "num_tqm_cmdq_full = %u\n",
le32_to_cpu(htt_stats_buf->num_tqm_cmdq_full));
len += scnprintf(buf + len, buf_len - len, "num_de_sched_algo_trigger = %u\n",
le32_to_cpu(htt_stats_buf->num_de_sched_algo_trigger));
len += scnprintf(buf + len, buf_len - len, "num_rt_sched_algo_trigger = %u\n",
le32_to_cpu(htt_stats_buf->num_rt_sched_algo_trigger));
len += scnprintf(buf + len, buf_len - len, "num_tqm_sched_algo_trigger = %u\n",
le32_to_cpu(htt_stats_buf->num_tqm_sched_algo_trigger));
len += scnprintf(buf + len, buf_len - len, "notify_sched = %u\n",
le32_to_cpu(htt_stats_buf->notify_sched));
len += scnprintf(buf + len, buf_len - len, "dur_based_sendn_term = %u\n",
le32_to_cpu(htt_stats_buf->dur_based_sendn_term));
len += scnprintf(buf + len, buf_len - len, "su_notify2_sched = %u\n",
le32_to_cpu(htt_stats_buf->su_notify2_sched));
len += scnprintf(buf + len, buf_len - len, "su_optimal_queued_msdus_sched = %u\n",
le32_to_cpu(htt_stats_buf->su_optimal_queued_msdus_sched));
len += scnprintf(buf + len, buf_len - len, "su_delay_timeout_sched = %u\n",
le32_to_cpu(htt_stats_buf->su_delay_timeout_sched));
len += scnprintf(buf + len, buf_len - len, "su_min_txtime_sched_delay = %u\n",
le32_to_cpu(htt_stats_buf->su_min_txtime_sched_delay));
len += scnprintf(buf + len, buf_len - len, "su_no_delay = %u\n",
le32_to_cpu(htt_stats_buf->su_no_delay));
len += scnprintf(buf + len, buf_len - len, "num_supercycles = %u\n",
le32_to_cpu(htt_stats_buf->num_supercycles));
len += scnprintf(buf + len, buf_len - len, "num_subcycles_with_sort = %u\n",
le32_to_cpu(htt_stats_buf->num_subcycles_with_sort));
len += scnprintf(buf + len, buf_len - len, "num_subcycles_no_sort = %u\n\n",
le32_to_cpu(htt_stats_buf->num_subcycles_no_sort));
stats_req->buf_len = len;
}
static void
ath12k_htt_print_sched_txq_cmd_posted_tlv(const void *tag_buf,
u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct ath12k_htt_sched_txq_cmd_posted_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
u16 num_elements = tag_len >> 2;
len += scnprintf(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_POSTED_TLV:\n");
len += print_array_to_buf(buf, len, "sched_cmd_posted",
htt_stats_buf->sched_cmd_posted, num_elements, "\n\n");
stats_req->buf_len = len;
}
static void
ath12k_htt_print_sched_txq_cmd_reaped_tlv(const void *tag_buf,
u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct ath12k_htt_sched_txq_cmd_reaped_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
u16 num_elements = tag_len >> 2;
len += scnprintf(buf + len, buf_len - len, "HTT_SCHED_TXQ_CMD_REAPED_TLV:\n");
len += print_array_to_buf(buf, len, "sched_cmd_reaped",
htt_stats_buf->sched_cmd_reaped, num_elements, "\n\n");
stats_req->buf_len = len;
}
static void
ath12k_htt_print_sched_txq_sched_order_su_tlv(const void *tag_buf,
u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct ath12k_htt_sched_txq_sched_order_su_tlv *htt_stats_buf = tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
u32 sched_order_su_num_entries = min_t(u32, (tag_len >> 2),
ATH12K_HTT_TX_PDEV_NUM_SCHED_ORDER_LOG);
len += scnprintf(buf + len, buf_len - len,
"HTT_SCHED_TXQ_SCHED_ORDER_SU_TLV:\n");
len += print_array_to_buf(buf, len, "sched_order_su",
htt_stats_buf->sched_order_su,
sched_order_su_num_entries, "\n\n");
stats_req->buf_len = len;
}
static void
ath12k_htt_print_sched_txq_sched_ineligibility_tlv(const void *tag_buf,
u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct ath12k_htt_sched_txq_sched_ineligibility_tlv *htt_stats_buf =
tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
u32 sched_ineligibility_num_entries = tag_len >> 2;
len += scnprintf(buf + len, buf_len - len,
"HTT_SCHED_TXQ_SCHED_INELIGIBILITY:\n");
len += print_array_to_buf(buf, len, "sched_ineligibility",
htt_stats_buf->sched_ineligibility,
sched_ineligibility_num_entries, "\n\n");
stats_req->buf_len = len;
}
static void
ath12k_htt_print_sched_txq_supercycle_trigger_tlv(const void *tag_buf,
u16 tag_len,
struct debug_htt_stats_req *stats_req)
{
const struct ath12k_htt_sched_txq_supercycle_triggers_tlv *htt_stats_buf =
tag_buf;
u8 *buf = stats_req->buf;
u32 len = stats_req->buf_len;
u32 buf_len = ATH12K_HTT_STATS_BUF_SIZE;
u16 num_elems = min_t(u16, (tag_len >> 2),
ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_MAX);
len += scnprintf(buf + len, buf_len - len,
"HTT_SCHED_TXQ_SUPERCYCLE_TRIGGER:\n");
len += print_array_to_buf(buf, len, "supercycle_triggers",
htt_stats_buf->supercycle_triggers, num_elems, "\n\n");
stats_req->buf_len = len;
}
static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab, static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab,
u16 tag, u16 len, const void *tag_buf, u16 tag, u16 len, const void *tag_buf,
void *user_data) void *user_data)
...@@ -414,6 +624,29 @@ static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab, ...@@ -414,6 +624,29 @@ static int ath12k_dbg_htt_ext_stats_parse(struct ath12k_base *ab,
case HTT_STATS_MU_PPDU_DIST_TAG: case HTT_STATS_MU_PPDU_DIST_TAG:
htt_print_tx_pdev_mu_ppdu_dist_stats_tlv(tag_buf, len, stats_req); htt_print_tx_pdev_mu_ppdu_dist_stats_tlv(tag_buf, len, stats_req);
break; break;
case HTT_STATS_TX_SCHED_CMN_TAG:
ath12k_htt_print_stats_tx_sched_cmn_tlv(tag_buf, len, stats_req);
break;
case HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG:
ath12k_htt_print_tx_pdev_stats_sched_per_txq_tlv(tag_buf, len, stats_req);
break;
case HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG:
ath12k_htt_print_sched_txq_cmd_posted_tlv(tag_buf, len, stats_req);
break;
case HTT_STATS_SCHED_TXQ_CMD_REAPED_TAG:
ath12k_htt_print_sched_txq_cmd_reaped_tlv(tag_buf, len, stats_req);
break;
case HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG:
ath12k_htt_print_sched_txq_sched_order_su_tlv(tag_buf, len, stats_req);
break;
case HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG:
ath12k_htt_print_sched_txq_sched_ineligibility_tlv(tag_buf, len,
stats_req);
break;
case HTT_STATS_SCHED_TXQ_SUPERCYCLE_TRIGGER_TAG:
ath12k_htt_print_sched_txq_supercycle_trigger_tlv(tag_buf, len,
stats_req);
break;
default: default:
break; break;
} }
......
...@@ -125,6 +125,7 @@ struct ath12k_htt_extd_stats_msg { ...@@ -125,6 +125,7 @@ struct ath12k_htt_extd_stats_msg {
enum ath12k_dbg_htt_ext_stats_type { enum ath12k_dbg_htt_ext_stats_type {
ATH12K_DBG_HTT_EXT_STATS_RESET = 0, ATH12K_DBG_HTT_EXT_STATS_RESET = 0,
ATH12K_DBG_HTT_EXT_STATS_PDEV_TX = 1, ATH12K_DBG_HTT_EXT_STATS_PDEV_TX = 1,
ATH12K_DBG_HTT_EXT_STATS_PDEV_TX_SCHED = 4,
/* keep this last */ /* keep this last */
ATH12K_DBG_HTT_NUM_EXT_STATS, ATH12K_DBG_HTT_NUM_EXT_STATS,
...@@ -135,7 +136,14 @@ enum ath12k_dbg_htt_tlv_tag { ...@@ -135,7 +136,14 @@ enum ath12k_dbg_htt_tlv_tag {
HTT_STATS_TX_PDEV_UNDERRUN_TAG = 1, HTT_STATS_TX_PDEV_UNDERRUN_TAG = 1,
HTT_STATS_TX_PDEV_SIFS_TAG = 2, HTT_STATS_TX_PDEV_SIFS_TAG = 2,
HTT_STATS_TX_PDEV_FLUSH_TAG = 3, HTT_STATS_TX_PDEV_FLUSH_TAG = 3,
HTT_STATS_TX_PDEV_SCHEDULER_TXQ_STATS_TAG = 36,
HTT_STATS_TX_SCHED_CMN_TAG = 37,
HTT_STATS_SCHED_TXQ_CMD_POSTED_TAG = 39,
HTT_STATS_SCHED_TXQ_CMD_REAPED_TAG = 44,
HTT_STATS_TX_PDEV_SIFS_HIST_TAG = 67, HTT_STATS_TX_PDEV_SIFS_HIST_TAG = 67,
HTT_STATS_SCHED_TXQ_SCHED_ORDER_SU_TAG = 86,
HTT_STATS_SCHED_TXQ_SCHED_INELIGIBILITY_TAG = 87,
HTT_STATS_SCHED_TXQ_SUPERCYCLE_TRIGGER_TAG = 100,
HTT_STATS_PDEV_CTRL_PATH_TX_STATS_TAG = 102, HTT_STATS_PDEV_CTRL_PATH_TX_STATS_TAG = 102,
HTT_STATS_MU_PPDU_DIST_TAG = 129, HTT_STATS_MU_PPDU_DIST_TAG = 129,
...@@ -297,4 +305,77 @@ struct ath12k_htt_tx_pdev_mu_ppdu_dist_stats_tlv { ...@@ -297,4 +305,77 @@ struct ath12k_htt_tx_pdev_mu_ppdu_dist_stats_tlv {
__le32 num_ppdu_posted_per_burst[ATH12K_HTT_STATS_MU_PPDU_PER_BURST_WORDS]; __le32 num_ppdu_posted_per_burst[ATH12K_HTT_STATS_MU_PPDU_PER_BURST_WORDS];
} __packed; } __packed;
#define ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_MAC_ID GENMASK(7, 0)
#define ATH12K_HTT_TX_PDEV_STATS_SCHED_PER_TXQ_ID GENMASK(15, 8)
#define ATH12K_HTT_TX_PDEV_NUM_SCHED_ORDER_LOG 20
struct ath12k_htt_stats_tx_sched_cmn_tlv {
__le32 mac_id__word;
__le32 current_timestamp;
} __packed;
struct ath12k_htt_tx_pdev_stats_sched_per_txq_tlv {
__le32 mac_id__word;
__le32 sched_policy;
__le32 last_sched_cmd_posted_timestamp;
__le32 last_sched_cmd_compl_timestamp;
__le32 sched_2_tac_lwm_count;
__le32 sched_2_tac_ring_full;
__le32 sched_cmd_post_failure;
__le32 num_active_tids;
__le32 num_ps_schedules;
__le32 sched_cmds_pending;
__le32 num_tid_register;
__le32 num_tid_unregister;
__le32 num_qstats_queried;
__le32 qstats_update_pending;
__le32 last_qstats_query_timestamp;
__le32 num_tqm_cmdq_full;
__le32 num_de_sched_algo_trigger;
__le32 num_rt_sched_algo_trigger;
__le32 num_tqm_sched_algo_trigger;
__le32 notify_sched;
__le32 dur_based_sendn_term;
__le32 su_notify2_sched;
__le32 su_optimal_queued_msdus_sched;
__le32 su_delay_timeout_sched;
__le32 su_min_txtime_sched_delay;
__le32 su_no_delay;
__le32 num_supercycles;
__le32 num_subcycles_with_sort;
__le32 num_subcycles_no_sort;
} __packed;
struct ath12k_htt_sched_txq_cmd_posted_tlv {
DECLARE_FLEX_ARRAY(__le32, sched_cmd_posted);
} __packed;
struct ath12k_htt_sched_txq_cmd_reaped_tlv {
DECLARE_FLEX_ARRAY(__le32, sched_cmd_reaped);
} __packed;
struct ath12k_htt_sched_txq_sched_order_su_tlv {
DECLARE_FLEX_ARRAY(__le32, sched_order_su);
} __packed;
struct ath12k_htt_sched_txq_sched_ineligibility_tlv {
DECLARE_FLEX_ARRAY(__le32, sched_ineligibility);
} __packed;
enum ath12k_htt_sched_txq_supercycle_triggers_tlv_enum {
ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_NONE = 0,
ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_FORCED,
ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_LESS_NUM_TIDQ_ENTRIES,
ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_LESS_NUM_ACTIVE_TIDS,
ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_MAX_ITR_REACHED,
ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_DUR_THRESHOLD_REACHED,
ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_TWT_TRIGGER,
ATH12K_HTT_SCHED_SUPERCYCLE_TRIGGER_MAX,
};
struct ath12k_htt_sched_txq_supercycle_triggers_tlv {
DECLARE_FLEX_ARRAY(__le32, supercycle_triggers);
} __packed;
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment