Commit a72e25f7 authored by John W. Linville's avatar John W. Linville

Merge branch 'for-linville' of git://github.com/kvalo/ath

parents b231070a 56b84287
......@@ -283,7 +283,7 @@ static int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
if (unlikely(CE_RING_DELTA(nentries_mask,
write_index, sw_index - 1) <= 0)) {
ret = -EIO;
ret = -ENOSR;
goto exit;
}
......@@ -338,38 +338,19 @@ int ath10k_ce_send(struct ath10k_ce_pipe *ce_state,
return ret;
}
int ath10k_ce_sendlist_send(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
unsigned int transfer_id,
u32 paddr, unsigned int nbytes,
u32 flags)
int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe)
{
struct ath10k_ce_ring *src_ring = ce_state->src_ring;
struct ath10k *ar = ce_state->ar;
struct ath10k *ar = pipe->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
unsigned int nentries_mask = src_ring->nentries_mask;
unsigned int sw_index;
unsigned int write_index;
int delta, ret = -ENOMEM;
int delta;
spin_lock_bh(&ar_pci->ce_lock);
sw_index = src_ring->sw_index;
write_index = src_ring->write_index;
delta = CE_RING_DELTA(nentries_mask, write_index, sw_index - 1);
if (delta >= 1) {
ret = ath10k_ce_send_nolock(ce_state, per_transfer_context,
paddr, nbytes,
transfer_id, flags);
if (ret)
ath10k_warn("CE send failed: %d\n", ret);
}
delta = CE_RING_DELTA(pipe->src_ring->nentries_mask,
pipe->src_ring->write_index,
pipe->src_ring->sw_index - 1);
spin_unlock_bh(&ar_pci->ce_lock);
return ret;
return delta;
}
int ath10k_ce_recv_buf_enqueue(struct ath10k_ce_pipe *ce_state,
......
......@@ -156,21 +156,7 @@ void ath10k_ce_send_cb_register(struct ath10k_ce_pipe *ce_state,
void (*send_cb)(struct ath10k_ce_pipe *),
int disable_interrupts);
/*
* Queue a "sendlist" of buffers to be sent using gather to a single
* anonymous destination buffer
* ce - which copy engine to use
* sendlist - list of simple buffers to send using gather
* transfer_id - arbitrary ID; reflected to destination
* Returns 0 on success; otherwise an error status.
*
* Implemenation note: Pushes multiple buffers with Gather to Source ring.
*/
int ath10k_ce_sendlist_send(struct ath10k_ce_pipe *ce_state,
void *per_transfer_context,
unsigned int transfer_id,
u32 paddr, unsigned int nbytes,
u32 flags);
int ath10k_ce_num_free_src_entries(struct ath10k_ce_pipe *pipe);
/*==================Recv=======================*/
......
......@@ -59,27 +59,6 @@ static void ath10k_send_suspend_complete(struct ath10k *ar)
wake_up(&ar->event_queue);
}
static int ath10k_check_fw_version(struct ath10k *ar)
{
char version[32];
if (ar->fw_version_major >= SUPPORTED_FW_MAJOR &&
ar->fw_version_minor >= SUPPORTED_FW_MINOR &&
ar->fw_version_release >= SUPPORTED_FW_RELEASE &&
ar->fw_version_build >= SUPPORTED_FW_BUILD)
return 0;
snprintf(version, sizeof(version), "%u.%u.%u.%u",
SUPPORTED_FW_MAJOR, SUPPORTED_FW_MINOR,
SUPPORTED_FW_RELEASE, SUPPORTED_FW_BUILD);
ath10k_warn("WARNING: Firmware version %s is not officially supported.\n",
ar->hw->wiphy->fw_version);
ath10k_warn("Please upgrade to version %s (or newer)\n", version);
return 0;
}
static int ath10k_init_connect_htc(struct ath10k *ar)
{
int status;
......@@ -189,8 +168,7 @@ static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar,
return fw;
}
static int ath10k_push_board_ext_data(struct ath10k *ar,
const struct firmware *fw)
static int ath10k_push_board_ext_data(struct ath10k *ar)
{
u32 board_data_size = QCA988X_BOARD_DATA_SZ;
u32 board_ext_data_size = QCA988X_BOARD_EXT_DATA_SZ;
......@@ -210,14 +188,14 @@ static int ath10k_push_board_ext_data(struct ath10k *ar,
if (board_ext_data_addr == 0)
return 0;
if (fw->size != (board_data_size + board_ext_data_size)) {
if (ar->board_len != (board_data_size + board_ext_data_size)) {
ath10k_err("invalid board (ext) data sizes %zu != %d+%d\n",
fw->size, board_data_size, board_ext_data_size);
ar->board_len, board_data_size, board_ext_data_size);
return -EINVAL;
}
ret = ath10k_bmi_write_memory(ar, board_ext_data_addr,
fw->data + board_data_size,
ar->board_data + board_data_size,
board_ext_data_size);
if (ret) {
ath10k_err("could not write board ext data (%d)\n", ret);
......@@ -236,12 +214,11 @@ static int ath10k_push_board_ext_data(struct ath10k *ar,
static int ath10k_download_board_data(struct ath10k *ar)
{
const struct firmware *fw = ar->board_data;
u32 board_data_size = QCA988X_BOARD_DATA_SZ;
u32 address;
int ret;
ret = ath10k_push_board_ext_data(ar, fw);
ret = ath10k_push_board_ext_data(ar);
if (ret) {
ath10k_err("could not push board ext data (%d)\n", ret);
goto exit;
......@@ -253,8 +230,9 @@ static int ath10k_download_board_data(struct ath10k *ar)
goto exit;
}
ret = ath10k_bmi_write_memory(ar, address, fw->data,
min_t(u32, board_data_size, fw->size));
ret = ath10k_bmi_write_memory(ar, address, ar->board_data,
min_t(u32, board_data_size,
ar->board_len));
if (ret) {
ath10k_err("could not write board data (%d)\n", ret);
goto exit;
......@@ -272,17 +250,16 @@ static int ath10k_download_board_data(struct ath10k *ar)
static int ath10k_download_and_run_otp(struct ath10k *ar)
{
const struct firmware *fw = ar->otp;
u32 address = ar->hw_params.patch_load_addr;
u32 exec_param;
int ret;
/* OTP is optional */
if (!ar->otp)
if (!ar->otp_data || !ar->otp_len)
return 0;
ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size);
ret = ath10k_bmi_fast_download(ar, address, ar->otp_data, ar->otp_len);
if (ret) {
ath10k_err("could not write otp (%d)\n", ret);
goto exit;
......@@ -301,13 +278,13 @@ static int ath10k_download_and_run_otp(struct ath10k *ar)
static int ath10k_download_fw(struct ath10k *ar)
{
const struct firmware *fw = ar->firmware;
u32 address;
int ret;
address = ar->hw_params.patch_load_addr;
ret = ath10k_bmi_fast_download(ar, address, fw->data, fw->size);
ret = ath10k_bmi_fast_download(ar, address, ar->firmware_data,
ar->firmware_len);
if (ret) {
ath10k_err("could not write fw (%d)\n", ret);
goto exit;
......@@ -319,8 +296,8 @@ static int ath10k_download_fw(struct ath10k *ar)
static void ath10k_core_free_firmware_files(struct ath10k *ar)
{
if (ar->board_data && !IS_ERR(ar->board_data))
release_firmware(ar->board_data);
if (ar->board && !IS_ERR(ar->board))
release_firmware(ar->board);
if (ar->otp && !IS_ERR(ar->otp))
release_firmware(ar->otp);
......@@ -328,12 +305,20 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar)
if (ar->firmware && !IS_ERR(ar->firmware))
release_firmware(ar->firmware);
ar->board = NULL;
ar->board_data = NULL;
ar->board_len = 0;
ar->otp = NULL;
ar->otp_data = NULL;
ar->otp_len = 0;
ar->firmware = NULL;
ar->firmware_data = NULL;
ar->firmware_len = 0;
}
static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)
{
int ret = 0;
......@@ -347,15 +332,18 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
return -EINVAL;
}
ar->board_data = ath10k_fetch_fw_file(ar,
ar->board = ath10k_fetch_fw_file(ar,
ar->hw_params.fw.dir,
ar->hw_params.fw.board);
if (IS_ERR(ar->board_data)) {
ret = PTR_ERR(ar->board_data);
if (IS_ERR(ar->board)) {
ret = PTR_ERR(ar->board);
ath10k_err("could not fetch board data (%d)\n", ret);
goto err;
}
ar->board_data = ar->board->data;
ar->board_len = ar->board->size;
ar->firmware = ath10k_fetch_fw_file(ar,
ar->hw_params.fw.dir,
ar->hw_params.fw.fw);
......@@ -365,6 +353,9 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
goto err;
}
ar->firmware_data = ar->firmware->data;
ar->firmware_len = ar->firmware->size;
/* OTP may be undefined. If so, don't fetch it at all */
if (ar->hw_params.fw.otp == NULL)
return 0;
......@@ -378,6 +369,172 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
goto err;
}
ar->otp_data = ar->otp->data;
ar->otp_len = ar->otp->size;
return 0;
err:
ath10k_core_free_firmware_files(ar);
return ret;
}
static int ath10k_core_fetch_firmware_api_n(struct ath10k *ar, const char *name)
{
size_t magic_len, len, ie_len;
int ie_id, i, index, bit, ret;
struct ath10k_fw_ie *hdr;
const u8 *data;
__le32 *timestamp;
/* first fetch the firmware file (firmware-*.bin) */
ar->firmware = ath10k_fetch_fw_file(ar, ar->hw_params.fw.dir, name);
if (IS_ERR(ar->firmware)) {
ath10k_err("Could not fetch firmware file '%s': %ld\n",
name, PTR_ERR(ar->firmware));
return PTR_ERR(ar->firmware);
}
data = ar->firmware->data;
len = ar->firmware->size;
/* magic also includes the null byte, check that as well */
magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
if (len < magic_len) {
ath10k_err("firmware image too small to contain magic: %zu\n",
len);
ret = -EINVAL;
goto err;
}
if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) {
ath10k_err("Invalid firmware magic\n");
ret = -EINVAL;
goto err;
}
/* jump over the padding */
magic_len = ALIGN(magic_len, 4);
len -= magic_len;
data += magic_len;
/* loop elements */
while (len > sizeof(struct ath10k_fw_ie)) {
hdr = (struct ath10k_fw_ie *)data;
ie_id = le32_to_cpu(hdr->id);
ie_len = le32_to_cpu(hdr->len);
len -= sizeof(*hdr);
data += sizeof(*hdr);
if (len < ie_len) {
ath10k_err("Invalid length for FW IE %d (%zu < %zu)\n",
ie_id, len, ie_len);
ret = -EINVAL;
goto err;
}
switch (ie_id) {
case ATH10K_FW_IE_FW_VERSION:
if (ie_len > sizeof(ar->hw->wiphy->fw_version) - 1)
break;
memcpy(ar->hw->wiphy->fw_version, data, ie_len);
ar->hw->wiphy->fw_version[ie_len] = '\0';
ath10k_dbg(ATH10K_DBG_BOOT,
"found fw version %s\n",
ar->hw->wiphy->fw_version);
break;
case ATH10K_FW_IE_TIMESTAMP:
if (ie_len != sizeof(u32))
break;
timestamp = (__le32 *)data;
ath10k_dbg(ATH10K_DBG_BOOT, "found fw timestamp %d\n",
le32_to_cpup(timestamp));
break;
case ATH10K_FW_IE_FEATURES:
ath10k_dbg(ATH10K_DBG_BOOT,
"found firmware features ie (%zd B)\n",
ie_len);
for (i = 0; i < ATH10K_FW_FEATURE_COUNT; i++) {
index = i / 8;
bit = i % 8;
if (index == ie_len)
break;
if (data[index] & (1 << bit))
__set_bit(i, ar->fw_features);
}
ath10k_dbg_dump(ATH10K_DBG_BOOT, "features", "",
ar->fw_features,
sizeof(ar->fw_features));
break;
case ATH10K_FW_IE_FW_IMAGE:
ath10k_dbg(ATH10K_DBG_BOOT,
"found fw image ie (%zd B)\n",
ie_len);
ar->firmware_data = data;
ar->firmware_len = ie_len;
break;
case ATH10K_FW_IE_OTP_IMAGE:
ath10k_dbg(ATH10K_DBG_BOOT,
"found otp image ie (%zd B)\n",
ie_len);
ar->otp_data = data;
ar->otp_len = ie_len;
break;
default:
ath10k_warn("Unknown FW IE: %u\n",
le32_to_cpu(hdr->id));
break;
}
/* jump over the padding */
ie_len = ALIGN(ie_len, 4);
len -= ie_len;
data += ie_len;
}
if (!ar->firmware_data || !ar->firmware_len) {
ath10k_warn("No ATH10K_FW_IE_FW_IMAGE found from %s, skipping\n",
name);
ret = -ENOMEDIUM;
goto err;
}
/* now fetch the board file */
if (ar->hw_params.fw.board == NULL) {
ath10k_err("board data file not defined");
ret = -EINVAL;
goto err;
}
ar->board = ath10k_fetch_fw_file(ar,
ar->hw_params.fw.dir,
ar->hw_params.fw.board);
if (IS_ERR(ar->board)) {
ret = PTR_ERR(ar->board);
ath10k_err("could not fetch board data (%d)\n", ret);
goto err;
}
ar->board_data = ar->board->data;
ar->board_len = ar->board->size;
return 0;
err:
......@@ -385,6 +542,28 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
return ret;
}
static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
{
int ret;
ret = ath10k_core_fetch_firmware_api_n(ar, ATH10K_FW_API2_FILE);
if (ret == 0) {
ar->fw_api = 2;
goto out;
}
ret = ath10k_core_fetch_firmware_api_1(ar);
if (ret)
return ret;
ar->fw_api = 1;
out:
ath10k_dbg(ATH10K_DBG_BOOT, "using fw api %d\n", ar->fw_api);
return 0;
}
static int ath10k_init_download_firmware(struct ath10k *ar)
{
int ret;
......@@ -541,6 +720,9 @@ struct ath10k *ath10k_core_create(void *hif_priv, struct device *dev,
INIT_WORK(&ar->offchan_tx_work, ath10k_offchan_tx_work);
skb_queue_head_init(&ar->offchan_tx_queue);
INIT_WORK(&ar->wmi_mgmt_tx_work, ath10k_mgmt_over_wmi_tx_work);
skb_queue_head_init(&ar->wmi_mgmt_tx_queue);
init_waitqueue_head(&ar->event_queue);
INIT_WORK(&ar->restart_work, ath10k_core_restart);
......@@ -555,6 +737,8 @@ EXPORT_SYMBOL(ath10k_core_create);
void ath10k_core_destroy(struct ath10k *ar)
{
ath10k_debug_destroy(ar);
flush_workqueue(ar->workqueue);
destroy_workqueue(ar->workqueue);
......@@ -566,6 +750,8 @@ int ath10k_core_start(struct ath10k *ar)
{
int status;
lockdep_assert_held(&ar->conf_mutex);
ath10k_bmi_start(ar);
if (ath10k_init_configure_target(ar)) {
......@@ -616,10 +802,6 @@ int ath10k_core_start(struct ath10k *ar)
ath10k_info("firmware %s booted\n", ar->hw->wiphy->fw_version);
status = ath10k_check_fw_version(ar);
if (status)
goto err_disconnect_htc;
status = ath10k_wmi_cmd_init(ar);
if (status) {
ath10k_err("could not send WMI init command (%d)\n", status);
......@@ -642,6 +824,7 @@ int ath10k_core_start(struct ath10k *ar)
goto err_disconnect_htc;
ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
INIT_LIST_HEAD(&ar->arvifs);
return 0;
......@@ -658,6 +841,8 @@ EXPORT_SYMBOL(ath10k_core_start);
void ath10k_core_stop(struct ath10k *ar)
{
lockdep_assert_held(&ar->conf_mutex);
ath10k_debug_stop(ar);
ath10k_htc_stop(&ar->htc);
ath10k_htt_detach(&ar->htt);
......@@ -705,15 +890,21 @@ static int ath10k_core_probe_fw(struct ath10k *ar)
return ret;
}
mutex_lock(&ar->conf_mutex);
ret = ath10k_core_start(ar);
if (ret) {
ath10k_err("could not init core (%d)\n", ret);
ath10k_core_free_firmware_files(ar);
ath10k_hif_power_down(ar);
mutex_unlock(&ar->conf_mutex);
return ret;
}
ath10k_core_stop(ar);
mutex_unlock(&ar->conf_mutex);
ath10k_hif_power_down(ar);
return 0;
}
......
......@@ -43,15 +43,17 @@
/* Antenna noise floor */
#define ATH10K_DEFAULT_NOISE_FLOOR -95
#define ATH10K_MAX_NUM_MGMT_PENDING 16
struct ath10k;
struct ath10k_skb_cb {
dma_addr_t paddr;
bool is_mapped;
bool is_aborted;
u8 vdev_id;
struct {
u8 vdev_id;
u8 tid;
bool is_offchan;
......@@ -102,11 +104,26 @@ struct ath10k_bmi {
bool done_sent;
};
#define ATH10K_MAX_MEM_REQS 16
struct ath10k_mem_chunk {
void *vaddr;
dma_addr_t paddr;
u32 len;
u32 req_id;
};
struct ath10k_wmi {
enum ath10k_htc_ep_id eid;
struct completion service_ready;
struct completion unified_ready;
wait_queue_head_t tx_credits_wq;
struct wmi_cmd_map *cmd;
struct wmi_vdev_param_map *vdev_param;
struct wmi_pdev_param_map *pdev_param;
u32 num_mem_chunks;
struct ath10k_mem_chunk mem_chunks[ATH10K_MAX_MEM_REQS];
};
struct ath10k_peer_stat {
......@@ -188,6 +205,8 @@ struct ath10k_peer {
#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
struct ath10k_vif {
struct list_head list;
u32 vdev_id;
enum wmi_vdev_type vdev_type;
enum wmi_vdev_subtype vdev_subtype;
......@@ -198,8 +217,10 @@ struct ath10k_vif {
struct ath10k *ar;
struct ieee80211_vif *vif;
struct work_struct wep_key_work;
struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1];
u8 def_wep_key_index;
u8 def_wep_key_idx;
u8 def_wep_key_newidx;
u16 tx_seq_no;
......@@ -268,6 +289,12 @@ enum ath10k_fw_features {
/* wmi_mgmt_rx_hdr contains extra RSSI information */
ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX = 0,
/* firmware from 10X branch */
ATH10K_FW_FEATURE_WMI_10X = 1,
/* firmware support tx frame management over WMI, otherwise it's HTT */
ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX = 2,
/* keep last */
ATH10K_FW_FEATURE_COUNT,
};
......@@ -324,9 +351,19 @@ struct ath10k {
} fw;
} hw_params;
const struct firmware *board_data;
const struct firmware *board;
const void *board_data;
size_t board_len;
const struct firmware *otp;
const void *otp_data;
size_t otp_len;
const struct firmware *firmware;
const void *firmware_data;
size_t firmware_len;
int fw_api;
struct {
struct completion started;
......@@ -369,6 +406,7 @@ struct ath10k {
/* protects shared structure data */
spinlock_t data_lock;
struct list_head arvifs;
struct list_head peers;
wait_queue_head_t peer_mapping_wq;
......@@ -377,6 +415,9 @@ struct ath10k {
struct completion offchan_tx_completed;
struct sk_buff *offchan_tx_skb;
struct work_struct wmi_mgmt_tx_work;
struct sk_buff_head wmi_mgmt_tx_queue;
enum ath10k_state state;
struct work_struct restart_work;
......
......@@ -618,6 +618,8 @@ int ath10k_debug_start(struct ath10k *ar)
{
int ret;
lockdep_assert_held(&ar->conf_mutex);
ret = ath10k_debug_htt_stats_req(ar);
if (ret)
/* continue normally anyway, this isn't serious */
......@@ -628,7 +630,13 @@ int ath10k_debug_start(struct ath10k *ar)
void ath10k_debug_stop(struct ath10k *ar)
{
cancel_delayed_work_sync(&ar->debug.htt_stats_dwork);
lockdep_assert_held(&ar->conf_mutex);
/* Must not use _sync to avoid deadlock, we do that in
* ath10k_debug_destroy(). The check for htt_stats_mask is to avoid
* warning from del_timer(). */
if (ar->debug.htt_stats_mask != 0)
cancel_delayed_work(&ar->debug.htt_stats_dwork);
}
int ath10k_debug_create(struct ath10k *ar)
......@@ -662,6 +670,11 @@ int ath10k_debug_create(struct ath10k *ar)
return 0;
}
void ath10k_debug_destroy(struct ath10k *ar)
{
cancel_delayed_work_sync(&ar->debug.htt_stats_dwork);
}
#endif /* CONFIG_ATH10K_DEBUGFS */
#ifdef CONFIG_ATH10K_DEBUG
......
......@@ -46,6 +46,7 @@ extern __printf(1, 2) int ath10k_warn(const char *fmt, ...);
int ath10k_debug_start(struct ath10k *ar);
void ath10k_debug_stop(struct ath10k *ar);
int ath10k_debug_create(struct ath10k *ar);
void ath10k_debug_destroy(struct ath10k *ar);
void ath10k_debug_read_service_map(struct ath10k *ar,
void *service_map,
size_t map_size);
......@@ -67,6 +68,10 @@ static inline int ath10k_debug_create(struct ath10k *ar)
return 0;
}
static inline void ath10k_debug_destroy(struct ath10k *ar)
{
}
static inline void ath10k_debug_read_service_map(struct ath10k *ar,
void *service_map,
size_t map_size)
......
......@@ -308,7 +308,7 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
struct sk_buff *txdesc = NULL;
struct htt_cmd *cmd;
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
u8 vdev_id = skb_cb->htt.vdev_id;
u8 vdev_id = skb_cb->vdev_id;
int len = 0;
int msdu_id = -1;
int res;
......@@ -384,7 +384,7 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu);
struct sk_buff *txdesc = NULL;
bool use_frags;
u8 vdev_id = ATH10K_SKB_CB(msdu)->htt.vdev_id;
u8 vdev_id = ATH10K_SKB_CB(msdu)->vdev_id;
u8 tid;
int prefetch_len, desc_len;
int msdu_id = -1;
......
......@@ -20,12 +20,6 @@
#include "targaddrs.h"
/* Supported FW version */
#define SUPPORTED_FW_MAJOR 1
#define SUPPORTED_FW_MINOR 0
#define SUPPORTED_FW_RELEASE 0
#define SUPPORTED_FW_BUILD 636
/* QCA988X 1.0 definitions (unsupported) */
#define QCA988X_HW_1_0_CHIP_ID_REV 0x0
......@@ -38,6 +32,25 @@
#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
#define ATH10K_FW_API2_FILE "firmware-2.bin"
/* includes also the null byte */
#define ATH10K_FIRMWARE_MAGIC "QCA-ATH10K"
struct ath10k_fw_ie {
__le32 id;
__le32 len;
u8 data[0];
};
enum ath10k_fw_ie_type {
ATH10K_FW_IE_FW_VERSION = 0,
ATH10K_FW_IE_TIMESTAMP = 1,
ATH10K_FW_IE_FEATURES = 2,
ATH10K_FW_IE_FW_IMAGE = 3,
ATH10K_FW_IE_OTP_IMAGE = 4,
};
/* Known pecularities:
* - current FW doesn't support raw rx mode (last tested v599)
* - current FW dumps upon raw tx mode (last tested v599)
......@@ -59,6 +72,7 @@ enum ath10k_mcast2ucast_mode {
ATH10K_MCAST2UCAST_ENABLED = 1,
};
/* Target specific defines for MAIN firmware */
#define TARGET_NUM_VDEVS 8
#define TARGET_NUM_PEER_AST 2
#define TARGET_NUM_WDS_ENTRIES 32
......@@ -93,6 +107,36 @@ enum ath10k_mcast2ucast_mode {
#define TARGET_NUM_MSDU_DESC (1024 + 400)
#define TARGET_MAX_FRAG_ENTRIES 0
/* Target specific defines for 10.X firmware */
#define TARGET_10X_NUM_VDEVS 16
#define TARGET_10X_NUM_PEER_AST 2
#define TARGET_10X_NUM_WDS_ENTRIES 32
#define TARGET_10X_DMA_BURST_SIZE 0
#define TARGET_10X_MAC_AGGR_DELIM 0
#define TARGET_10X_AST_SKID_LIMIT 16
#define TARGET_10X_NUM_PEERS (128 + (TARGET_10X_NUM_VDEVS))
#define TARGET_10X_NUM_OFFLOAD_PEERS 0
#define TARGET_10X_NUM_OFFLOAD_REORDER_BUFS 0
#define TARGET_10X_NUM_PEER_KEYS 2
#define TARGET_10X_NUM_TIDS 256
#define TARGET_10X_TX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
#define TARGET_10X_RX_CHAIN_MASK (BIT(0) | BIT(1) | BIT(2))
#define TARGET_10X_RX_TIMEOUT_LO_PRI 100
#define TARGET_10X_RX_TIMEOUT_HI_PRI 40
#define TARGET_10X_RX_DECAP_MODE ATH10K_HW_TXRX_NATIVE_WIFI
#define TARGET_10X_SCAN_MAX_PENDING_REQS 4
#define TARGET_10X_BMISS_OFFLOAD_MAX_VDEV 2
#define TARGET_10X_ROAM_OFFLOAD_MAX_VDEV 2
#define TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES 8
#define TARGET_10X_GTK_OFFLOAD_MAX_VDEV 3
#define TARGET_10X_NUM_MCAST_GROUPS 0
#define TARGET_10X_NUM_MCAST_TABLE_ELEMS 0
#define TARGET_10X_MCAST2UCAST_MODE ATH10K_MCAST2UCAST_DISABLED
#define TARGET_10X_TX_DBG_LOG_SIZE 1024
#define TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK 1
#define TARGET_10X_VOW_CONFIG 0
#define TARGET_10X_NUM_MSDU_DESC (1024 + 400)
#define TARGET_10X_MAX_FRAG_ENTRIES 0
/* Number of Copy Engines supported */
#define CE_COUNT 8
......
......@@ -334,25 +334,29 @@ static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr)
static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
{
struct ath10k *ar = arvif->ar;
u32 vdev_param;
if (value != 0xFFFFFFFF)
value = min_t(u32, arvif->ar->hw->wiphy->rts_threshold,
ATH10K_RTS_MAX);
return ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id,
WMI_VDEV_PARAM_RTS_THRESHOLD,
value);
vdev_param = ar->wmi.vdev_param->rts_threshold;
return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
}
static int ath10k_mac_set_frag(struct ath10k_vif *arvif, u32 value)
{
struct ath10k *ar = arvif->ar;
u32 vdev_param;
if (value != 0xFFFFFFFF)
value = clamp_t(u32, arvif->ar->hw->wiphy->frag_threshold,
ATH10K_FRAGMT_THRESHOLD_MIN,
ATH10K_FRAGMT_THRESHOLD_MAX);
return ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id,
WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
value);
vdev_param = ar->wmi.vdev_param->fragmentation_threshold;
return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
}
static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
......@@ -562,12 +566,9 @@ static int ath10k_monitor_stop(struct ath10k *ar)
lockdep_assert_held(&ar->conf_mutex);
/* For some reasons, ath10k_wmi_vdev_down() here couse
* often ath10k_wmi_vdev_stop() to fail. Next we could
* not run monitor vdev and driver reload
* required. Don't see such problems we skip
* ath10k_wmi_vdev_down() here.
*/
ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
if (ret)
ath10k_warn("Monitor vdev down failed: %d\n", ret);
ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
if (ret)
......@@ -677,6 +678,7 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
struct ieee80211_bss_conf *info,
const u8 self_peer[ETH_ALEN])
{
u32 vdev_param;
int ret = 0;
lockdep_assert_held(&arvif->ar->conf_mutex);
......@@ -710,8 +712,8 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
return;
}
ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id,
WMI_VDEV_PARAM_ATIM_WINDOW,
vdev_param = arvif->ar->wmi.vdev_param->atim_window;
ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
ATH10K_DEFAULT_ATIM);
if (ret)
ath10k_warn("Failed to set IBSS ATIM for VDEV:%d ret:%d\n",
......@@ -721,35 +723,30 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
/*
* Review this when mac80211 gains per-interface powersave support.
*/
static void ath10k_ps_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
{
struct ath10k_generic_iter *ar_iter = data;
struct ieee80211_conf *conf = &ar_iter->ar->hw->conf;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct ath10k *ar = arvif->ar;
struct ieee80211_conf *conf = &ar->hw->conf;
enum wmi_sta_powersave_param param;
enum wmi_sta_ps_mode psmode;
int ret;
lockdep_assert_held(&arvif->ar->conf_mutex);
if (vif->type != NL80211_IFTYPE_STATION)
return;
if (arvif->vif->type != NL80211_IFTYPE_STATION)
return 0;
if (conf->flags & IEEE80211_CONF_PS) {
psmode = WMI_STA_PS_MODE_ENABLED;
param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
ret = ath10k_wmi_set_sta_ps_param(ar_iter->ar,
arvif->vdev_id,
param,
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
conf->dynamic_ps_timeout);
if (ret) {
ath10k_warn("Failed to set inactivity time for VDEV: %d\n",
arvif->vdev_id);
return;
return ret;
}
ar_iter->ret = ret;
} else {
psmode = WMI_STA_PS_MODE_DISABLED;
}
......@@ -757,11 +754,14 @@ static void ath10k_ps_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d psmode %s\n",
arvif->vdev_id, psmode ? "enable" : "disable");
ar_iter->ret = ath10k_wmi_set_psmode(ar_iter->ar, arvif->vdev_id,
psmode);
if (ar_iter->ret)
ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode);
if (ret) {
ath10k_warn("Failed to set PS Mode: %d for VDEV: %d\n",
psmode, arvif->vdev_id);
return ret;
}
return 0;
}
/**********************/
......@@ -1031,14 +1031,27 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
struct wmi_peer_assoc_complete_arg *arg)
{
const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
u8 ampdu_factor;
if (!vht_cap->vht_supported)
return;
arg->peer_flags |= WMI_PEER_VHT;
arg->peer_vht_caps = vht_cap->cap;
ampdu_factor = (vht_cap->cap &
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
/* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to
* zero in VHT IE. Using it would result in degraded throughput.
* arg->peer_max_mpdu at this point contains HT max_mpdu so keep
* it if VHT max_mpdu is smaller. */
arg->peer_max_mpdu = max(arg->peer_max_mpdu,
(1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
ampdu_factor)) - 1);
if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
arg->peer_flags |= WMI_PEER_80MHZ;
......@@ -1124,26 +1137,25 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
WARN_ON(phymode == MODE_UNKNOWN);
}
static int ath10k_peer_assoc(struct ath10k *ar,
static int ath10k_peer_assoc_prepare(struct ath10k *ar,
struct ath10k_vif *arvif,
struct ieee80211_sta *sta,
struct ieee80211_bss_conf *bss_conf)
struct ieee80211_bss_conf *bss_conf,
struct wmi_peer_assoc_complete_arg *arg)
{
struct wmi_peer_assoc_complete_arg arg;
lockdep_assert_held(&ar->conf_mutex);
memset(&arg, 0, sizeof(struct wmi_peer_assoc_complete_arg));
memset(arg, 0, sizeof(*arg));
ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, &arg);
ath10k_peer_assoc_h_crypto(ar, arvif, &arg);
ath10k_peer_assoc_h_rates(ar, sta, &arg);
ath10k_peer_assoc_h_ht(ar, sta, &arg);
ath10k_peer_assoc_h_vht(ar, sta, &arg);
ath10k_peer_assoc_h_qos(ar, arvif, sta, bss_conf, &arg);
ath10k_peer_assoc_h_phymode(ar, arvif, sta, &arg);
ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, arg);
ath10k_peer_assoc_h_crypto(ar, arvif, arg);
ath10k_peer_assoc_h_rates(ar, sta, arg);
ath10k_peer_assoc_h_ht(ar, sta, arg);
ath10k_peer_assoc_h_vht(ar, sta, arg);
ath10k_peer_assoc_h_qos(ar, arvif, sta, bss_conf, arg);
ath10k_peer_assoc_h_phymode(ar, arvif, sta, arg);
return ath10k_wmi_peer_assoc(ar, &arg);
return 0;
}
/* can be called only in mac80211 callbacks due to `key_count` usage */
......@@ -1153,6 +1165,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
{
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct wmi_peer_assoc_complete_arg peer_arg;
struct ieee80211_sta *ap_sta;
int ret;
......@@ -1168,15 +1181,24 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
return;
}
ret = ath10k_peer_assoc(ar, arvif, ap_sta, bss_conf);
ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta,
bss_conf, &peer_arg);
if (ret) {
ath10k_warn("Peer assoc failed for %pM\n", bss_conf->bssid);
ath10k_warn("Peer assoc prepare failed for %pM\n: %d",
bss_conf->bssid, ret);
rcu_read_unlock();
return;
}
rcu_read_unlock();
ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
if (ret) {
ath10k_warn("Peer assoc failed for %pM\n: %d",
bss_conf->bssid, ret);
return;
}
ath10k_dbg(ATH10K_DBG_MAC,
"mac vdev %d up (associated) bssid %pM aid %d\n",
arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
......@@ -1224,19 +1246,28 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
/* FIXME: why don't we print error if wmi call fails? */
ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
arvif->def_wep_key_index = 0;
arvif->def_wep_key_idx = 0;
}
static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
struct ieee80211_sta *sta)
{
struct wmi_peer_assoc_complete_arg peer_arg;
int ret = 0;
lockdep_assert_held(&ar->conf_mutex);
ret = ath10k_peer_assoc(ar, arvif, sta, NULL);
ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg);
if (ret) {
ath10k_warn("WMI peer assoc failed for %pM\n", sta->addr);
ath10k_warn("WMI peer assoc prepare failed for %pM\n",
sta->addr);
return ret;
}
ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
if (ret) {
ath10k_warn("Peer assoc failed for STA %pM\n: %d",
sta->addr, ret);
return ret;
}
......@@ -1405,6 +1436,33 @@ static void ath10k_reg_notifier(struct wiphy *wiphy,
/* TX handlers */
/***************/
static u8 ath10k_tx_h_get_tid(struct ieee80211_hdr *hdr)
{
if (ieee80211_is_mgmt(hdr->frame_control))
return HTT_DATA_TX_EXT_TID_MGMT;
if (!ieee80211_is_data_qos(hdr->frame_control))
return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
if (!is_unicast_ether_addr(ieee80211_get_DA(hdr)))
return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
return ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK;
}
static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar,
struct ieee80211_tx_info *info)
{
if (info->control.vif)
return ath10k_vif_to_arvif(info->control.vif)->vdev_id;
if (ar->monitor_enabled)
return ar->monitor_vdev_id;
ath10k_warn("could not resolve vdev id\n");
return 0;
}
/*
* Frames sent to the FW have to be in "Native Wifi" format.
* Strip the QoS field from the 802.11 header.
......@@ -1425,6 +1483,30 @@ static void ath10k_tx_h_qos_workaround(struct ieee80211_hw *hw,
skb_pull(skb, IEEE80211_QOS_CTL_LEN);
}
static void ath10k_tx_wep_key_work(struct work_struct *work)
{
struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
wep_key_work);
int ret, keyidx = arvif->def_wep_key_newidx;
if (arvif->def_wep_key_idx == keyidx)
return;
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
arvif->vdev_id, keyidx);
ret = ath10k_wmi_vdev_set_param(arvif->ar,
arvif->vdev_id,
arvif->ar->wmi.vdev_param->def_keyid,
keyidx);
if (ret) {
ath10k_warn("could not update wep keyidx (%d)\n", ret);
return;
}
arvif->def_wep_key_idx = keyidx;
}
static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
{
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
......@@ -1433,7 +1515,6 @@ static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
struct ath10k *ar = arvif->ar;
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ieee80211_key_conf *key = info->control.hw_key;
int ret;
if (!ieee80211_has_protected(hdr->frame_control))
return;
......@@ -1445,21 +1526,14 @@ static void ath10k_tx_h_update_wep_key(struct sk_buff *skb)
key->cipher != WLAN_CIPHER_SUITE_WEP104)
return;
if (key->keyidx == arvif->def_wep_key_index)
return;
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d keyidx %d\n",
arvif->vdev_id, key->keyidx);
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
WMI_VDEV_PARAM_DEF_KEYID,
key->keyidx);
if (ret) {
ath10k_warn("could not update wep keyidx (%d)\n", ret);
if (key->keyidx == arvif->def_wep_key_idx)
return;
}
arvif->def_wep_key_index = key->keyidx;
/* FIXME: Most likely a few frames will be TXed with an old key. Simply
* queueing frames until key index is updated is not an option because
* sk_buff may need more processing to be done, e.g. offchannel */
arvif->def_wep_key_newidx = key->keyidx;
ieee80211_queue_work(ar->hw, &arvif->wep_key_work);
}
static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, struct sk_buff *skb)
......@@ -1489,7 +1563,7 @@ static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, struct sk_buff *skb)
static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
{
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
int ret;
int ret = 0;
if (ar->htt.target_version_major >= 3) {
/* Since HTT 3.0 there is no separate mgmt tx command */
......@@ -1497,16 +1571,32 @@ static void ath10k_tx_htt(struct ath10k *ar, struct sk_buff *skb)
goto exit;
}
if (ieee80211_is_mgmt(hdr->frame_control))
if (ieee80211_is_mgmt(hdr->frame_control)) {
if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
ar->fw_features)) {
if (skb_queue_len(&ar->wmi_mgmt_tx_queue) >=
ATH10K_MAX_NUM_MGMT_PENDING) {
ath10k_warn("wmi mgmt_tx queue limit reached\n");
ret = -EBUSY;
goto exit;
}
skb_queue_tail(&ar->wmi_mgmt_tx_queue, skb);
ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
} else {
ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
else if (ieee80211_is_nullfunc(hdr->frame_control))
}
} else if (!test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
ar->fw_features) &&
ieee80211_is_nullfunc(hdr->frame_control)) {
/* FW does not report tx status properly for NullFunc frames
* unless they are sent through mgmt tx path. mac80211 sends
* those frames when it detects link/beacon loss and depends on
* the tx status to be correct. */
* those frames when it detects link/beacon loss and depends
* on the tx status to be correct. */
ret = ath10k_htt_mgmt_tx(&ar->htt, skb);
else
} else {
ret = ath10k_htt_tx(&ar->htt, skb);
}
exit:
if (ret) {
......@@ -1557,7 +1647,7 @@ void ath10k_offchan_tx_work(struct work_struct *work)
hdr = (struct ieee80211_hdr *)skb->data;
peer_addr = ieee80211_get_DA(hdr);
vdev_id = ATH10K_SKB_CB(skb)->htt.vdev_id;
vdev_id = ATH10K_SKB_CB(skb)->vdev_id;
spin_lock_bh(&ar->data_lock);
peer = ath10k_peer_find(ar, vdev_id, peer_addr);
......@@ -1599,6 +1689,36 @@ void ath10k_offchan_tx_work(struct work_struct *work)
}
}
void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar)
{
struct sk_buff *skb;
for (;;) {
skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
if (!skb)
break;
ieee80211_free_txskb(ar->hw, skb);
}
}
void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
{
struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work);
struct sk_buff *skb;
int ret;
for (;;) {
skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
if (!skb)
break;
ret = ath10k_wmi_mgmt_tx(ar, skb);
if (ret)
ath10k_warn("wmi mgmt_tx failed (%d)\n", ret);
}
}
/************/
/* Scanning */
/************/
......@@ -1722,16 +1842,7 @@ static void ath10k_tx(struct ieee80211_hw *hw,
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = NULL;
u32 vdev_id = 0;
u8 tid;
if (info->control.vif) {
arvif = ath10k_vif_to_arvif(info->control.vif);
vdev_id = arvif->vdev_id;
} else if (ar->monitor_enabled) {
vdev_id = ar->monitor_vdev_id;
}
u8 tid, vdev_id;
/* We should disable CCK RATE due to P2P */
if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
......@@ -1739,14 +1850,8 @@ static void ath10k_tx(struct ieee80211_hw *hw,
/* we must calculate tid before we apply qos workaround
* as we'd lose the qos control field */
tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST;
if (ieee80211_is_mgmt(hdr->frame_control)) {
tid = HTT_DATA_TX_EXT_TID_MGMT;
} else if (ieee80211_is_data_qos(hdr->frame_control) &&
is_unicast_ether_addr(ieee80211_get_DA(hdr))) {
u8 *qc = ieee80211_get_qos_ctl(hdr);
tid = qc[0] & IEEE80211_QOS_CTL_TID_MASK;
}
tid = ath10k_tx_h_get_tid(hdr);
vdev_id = ath10k_tx_h_get_vdev_id(ar, info);
/* it makes no sense to process injected frames like that */
if (info->control.vif &&
......@@ -1757,14 +1862,14 @@ static void ath10k_tx(struct ieee80211_hw *hw,
ath10k_tx_h_seq_no(skb);
}
ATH10K_SKB_CB(skb)->vdev_id = vdev_id;
ATH10K_SKB_CB(skb)->htt.is_offchan = false;
ATH10K_SKB_CB(skb)->htt.vdev_id = vdev_id;
ATH10K_SKB_CB(skb)->htt.tid = tid;
if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
spin_lock_bh(&ar->data_lock);
ATH10K_SKB_CB(skb)->htt.is_offchan = true;
ATH10K_SKB_CB(skb)->htt.vdev_id = ar->scan.vdev_id;
ATH10K_SKB_CB(skb)->vdev_id = ar->scan.vdev_id;
spin_unlock_bh(&ar->data_lock);
ath10k_dbg(ATH10K_DBG_MAC, "queued offchannel skb %p\n", skb);
......@@ -1786,6 +1891,7 @@ void ath10k_halt(struct ath10k *ar)
del_timer_sync(&ar->scan.timeout);
ath10k_offchan_tx_purge(ar);
ath10k_mgmt_over_wmi_tx_purge(ar);
ath10k_peer_cleanup_all(ar);
ath10k_core_stop(ar);
ath10k_hif_power_down(ar);
......@@ -1832,12 +1938,12 @@ static int ath10k_start(struct ieee80211_hw *hw)
else if (ar->state == ATH10K_STATE_RESTARTING)
ar->state = ATH10K_STATE_RESTARTED;
ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_PMF_QOS, 1);
ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pmf_qos, 1);
if (ret)
ath10k_warn("could not enable WMI_PDEV_PARAM_PMF_QOS (%d)\n",
ret);
ret = ath10k_wmi_pdev_set_param(ar, WMI_PDEV_PARAM_DYNAMIC_BW, 0);
ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 0);
if (ret)
ath10k_warn("could not init WMI_PDEV_PARAM_DYNAMIC_BW (%d)\n",
ret);
......@@ -1862,32 +1968,29 @@ static void ath10k_stop(struct ieee80211_hw *hw)
ar->state = ATH10K_STATE_OFF;
mutex_unlock(&ar->conf_mutex);
ath10k_mgmt_over_wmi_tx_purge(ar);
cancel_work_sync(&ar->offchan_tx_work);
cancel_work_sync(&ar->wmi_mgmt_tx_work);
cancel_work_sync(&ar->restart_work);
}
static void ath10k_config_ps(struct ath10k *ar)
static int ath10k_config_ps(struct ath10k *ar)
{
struct ath10k_generic_iter ar_iter;
struct ath10k_vif *arvif;
int ret = 0;
lockdep_assert_held(&ar->conf_mutex);
/* During HW reconfiguration mac80211 reports all interfaces that were
* running until reconfiguration was started. Since FW doesn't have any
* vdevs at this point we must not iterate over this interface list.
* This setting will be updated upon add_interface(). */
if (ar->state == ATH10K_STATE_RESTARTED)
return;
memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
ar_iter.ar = ar;
ieee80211_iterate_active_interfaces_atomic(
ar->hw, IEEE80211_IFACE_ITER_NORMAL,
ath10k_ps_iter, &ar_iter);
list_for_each_entry(arvif, &ar->arvifs, list) {
ret = ath10k_mac_vif_setup_ps(arvif);
if (ret) {
ath10k_warn("could not setup powersave (%d)\n", ret);
break;
}
}
if (ar_iter.ret)
ath10k_warn("failed to set ps config (%d)\n", ar_iter.ret);
return ret;
}
static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
......@@ -1936,6 +2039,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
int ret = 0;
u32 value;
int bit;
u32 vdev_param;
mutex_lock(&ar->conf_mutex);
......@@ -1944,21 +2048,22 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
arvif->ar = ar;
arvif->vif = vif;
INIT_WORK(&arvif->wep_key_work, ath10k_tx_wep_key_work);
if ((vif->type == NL80211_IFTYPE_MONITOR) && ar->monitor_present) {
ath10k_warn("Only one monitor interface allowed\n");
ret = -EBUSY;
goto exit;
goto err;
}
bit = ffs(ar->free_vdev_map);
if (bit == 0) {
ret = -EBUSY;
goto exit;
goto err;
}
arvif->vdev_id = bit - 1;
arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
ar->free_vdev_map &= ~(1 << arvif->vdev_id);
if (ar->p2p)
arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE;
......@@ -1994,25 +2099,34 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
arvif->vdev_subtype, vif->addr);
if (ret) {
ath10k_warn("WMI vdev create failed: ret %d\n", ret);
goto exit;
goto err;
}
ret = ath10k_wmi_vdev_set_param(ar, 0, WMI_VDEV_PARAM_DEF_KEYID,
arvif->def_wep_key_index);
if (ret)
ar->free_vdev_map &= ~BIT(arvif->vdev_id);
list_add(&arvif->list, &ar->arvifs);
vdev_param = ar->wmi.vdev_param->def_keyid;
ret = ath10k_wmi_vdev_set_param(ar, 0, vdev_param,
arvif->def_wep_key_idx);
if (ret) {
ath10k_warn("Failed to set default keyid: %d\n", ret);
goto err_vdev_delete;
}
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
WMI_VDEV_PARAM_TX_ENCAP_TYPE,
vdev_param = ar->wmi.vdev_param->tx_encap_type;
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
ATH10K_HW_TXRX_NATIVE_WIFI);
if (ret)
/* 10.X firmware does not support this VDEV parameter. Do not warn */
if (ret && ret != -EOPNOTSUPP) {
ath10k_warn("Failed to set TX encap: %d\n", ret);
goto err_vdev_delete;
}
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr);
if (ret) {
ath10k_warn("Failed to create peer for AP: %d\n", ret);
goto exit;
goto err_vdev_delete;
}
}
......@@ -2021,39 +2135,62 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param, value);
if (ret)
if (ret) {
ath10k_warn("Failed to set RX wake policy: %d\n", ret);
goto err_peer_delete;
}
param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param, value);
if (ret)
if (ret) {
ath10k_warn("Failed to set TX wake thresh: %d\n", ret);
goto err_peer_delete;
}
param = WMI_STA_PS_PARAM_PSPOLL_COUNT;
value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
param, value);
if (ret)
if (ret) {
ath10k_warn("Failed to set PSPOLL count: %d\n", ret);
goto err_peer_delete;
}
}
ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
if (ret)
if (ret) {
ath10k_warn("failed to set rts threshold for vdev %d (%d)\n",
arvif->vdev_id, ret);
goto err_peer_delete;
}
ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold);
if (ret)
if (ret) {
ath10k_warn("failed to set frag threshold for vdev %d (%d)\n",
arvif->vdev_id, ret);
goto err_peer_delete;
}
if (arvif->vdev_type == WMI_VDEV_TYPE_MONITOR)
ar->monitor_present = true;
exit:
mutex_unlock(&ar->conf_mutex);
return 0;
err_peer_delete:
if (arvif->vdev_type == WMI_VDEV_TYPE_AP)
ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr);
err_vdev_delete:
ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
ar->free_vdev_map &= ~BIT(arvif->vdev_id);
list_del(&arvif->list);
err:
mutex_unlock(&ar->conf_mutex);
return ret;
}
......@@ -2066,6 +2203,8 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
cancel_work_sync(&arvif->wep_key_work);
spin_lock_bh(&ar->data_lock);
if (arvif->beacon) {
dev_kfree_skb_any(arvif->beacon);
......@@ -2074,6 +2213,7 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
spin_unlock_bh(&ar->data_lock);
ar->free_vdev_map |= 1 << (arvif->vdev_id);
list_del(&arvif->list);
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id, vif->addr);
......@@ -2154,6 +2294,7 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
struct ath10k *ar = hw->priv;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
int ret = 0;
u32 vdev_param, pdev_param;
mutex_lock(&ar->conf_mutex);
......@@ -2162,8 +2303,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
if (changed & BSS_CHANGED_BEACON_INT) {
arvif->beacon_interval = info->beacon_int;
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
WMI_VDEV_PARAM_BEACON_INTERVAL,
vdev_param = ar->wmi.vdev_param->beacon_interval;
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
arvif->beacon_interval);
ath10k_dbg(ATH10K_DBG_MAC,
"mac vdev %d beacon_interval %d\n",
......@@ -2179,8 +2320,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
"vdev %d set beacon tx mode to staggered\n",
arvif->vdev_id);
ret = ath10k_wmi_pdev_set_param(ar,
WMI_PDEV_PARAM_BEACON_TX_MODE,
pdev_param = ar->wmi.pdev_param->beacon_tx_mode;
ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
WMI_BEACON_STAGGERED_MODE);
if (ret)
ath10k_warn("Failed to set beacon mode for VDEV: %d\n",
......@@ -2194,8 +2335,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
"mac vdev %d dtim_period %d\n",
arvif->vdev_id, arvif->dtim_period);
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
WMI_VDEV_PARAM_DTIM_PERIOD,
vdev_param = ar->wmi.vdev_param->dtim_period;
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
arvif->dtim_period);
if (ret)
ath10k_warn("Failed to set dtim period for VDEV: %d\n",
......@@ -2262,8 +2403,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
arvif->vdev_id, cts_prot);
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
WMI_VDEV_PARAM_ENABLE_RTSCTS,
vdev_param = ar->wmi.vdev_param->enable_rtscts;
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
cts_prot);
if (ret)
ath10k_warn("Failed to set CTS prot for VDEV: %d\n",
......@@ -2281,8 +2422,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n",
arvif->vdev_id, slottime);
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
WMI_VDEV_PARAM_SLOT_TIME,
vdev_param = ar->wmi.vdev_param->slot_time;
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
slottime);
if (ret)
ath10k_warn("Failed to set erp slot for VDEV: %d\n",
......@@ -2300,8 +2441,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
"mac vdev %d preamble %dn",
arvif->vdev_id, preamble);
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
WMI_VDEV_PARAM_PREAMBLE,
vdev_param = ar->wmi.vdev_param->preamble;
ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
preamble);
if (ret)
ath10k_warn("Failed to set preamble for VDEV: %d\n",
......@@ -2751,86 +2892,51 @@ static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw)
* Both RTS and Fragmentation threshold are interface-specific
* in ath10k, but device-specific in mac80211.
*/
static void ath10k_set_rts_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
{
struct ath10k_generic_iter *ar_iter = data;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
u32 rts = ar_iter->ar->hw->wiphy->rts_threshold;
lockdep_assert_held(&arvif->ar->conf_mutex);
/* During HW reconfiguration mac80211 reports all interfaces that were
* running until reconfiguration was started. Since FW doesn't have any
* vdevs at this point we must not iterate over this interface list.
* This setting will be updated upon add_interface(). */
if (ar_iter->ar->state == ATH10K_STATE_RESTARTED)
return;
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d rts_threshold %d\n",
arvif->vdev_id, rts);
ar_iter->ret = ath10k_mac_set_rts(arvif, rts);
if (ar_iter->ret)
ath10k_warn("Failed to set RTS threshold for VDEV: %d\n",
arvif->vdev_id);
}
static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
{
struct ath10k_generic_iter ar_iter;
struct ath10k *ar = hw->priv;
memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
ar_iter.ar = ar;
struct ath10k_vif *arvif;
int ret = 0;
mutex_lock(&ar->conf_mutex);
ieee80211_iterate_active_interfaces_atomic(
hw, IEEE80211_IFACE_ITER_NORMAL,
ath10k_set_rts_iter, &ar_iter);
mutex_unlock(&ar->conf_mutex);
return ar_iter.ret;
}
list_for_each_entry(arvif, &ar->arvifs, list) {
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n",
arvif->vdev_id, value);
static void ath10k_set_frag_iter(void *data, u8 *mac, struct ieee80211_vif *vif)
{
struct ath10k_generic_iter *ar_iter = data;
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
u32 frag = ar_iter->ar->hw->wiphy->frag_threshold;
lockdep_assert_held(&arvif->ar->conf_mutex);
/* During HW reconfiguration mac80211 reports all interfaces that were
* running until reconfiguration was started. Since FW doesn't have any
* vdevs at this point we must not iterate over this interface list.
* This setting will be updated upon add_interface(). */
if (ar_iter->ar->state == ATH10K_STATE_RESTARTED)
return;
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d fragmentation_threshold %d\n",
arvif->vdev_id, frag);
ret = ath10k_mac_set_rts(arvif, value);
if (ret) {
ath10k_warn("could not set rts threshold for vdev %d (%d)\n",
arvif->vdev_id, ret);
break;
}
}
mutex_unlock(&ar->conf_mutex);
ar_iter->ret = ath10k_mac_set_frag(arvif, frag);
if (ar_iter->ret)
ath10k_warn("Failed to set frag threshold for VDEV: %d\n",
arvif->vdev_id);
return ret;
}
static int ath10k_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
{
struct ath10k_generic_iter ar_iter;
struct ath10k *ar = hw->priv;
memset(&ar_iter, 0, sizeof(struct ath10k_generic_iter));
ar_iter.ar = ar;
struct ath10k_vif *arvif;
int ret = 0;
mutex_lock(&ar->conf_mutex);
ieee80211_iterate_active_interfaces_atomic(
hw, IEEE80211_IFACE_ITER_NORMAL,
ath10k_set_frag_iter, &ar_iter);
list_for_each_entry(arvif, &ar->arvifs, list) {
ath10k_dbg(ATH10K_DBG_MAC, "mac vdev %d fragmentation threshold %d\n",
arvif->vdev_id, value);
ret = ath10k_mac_set_rts(arvif, value);
if (ret) {
ath10k_warn("could not set fragmentation threshold for vdev %d (%d)\n",
arvif->vdev_id, ret);
break;
}
}
mutex_unlock(&ar->conf_mutex);
return ar_iter.ret;
return ret;
}
static void ath10k_flush(struct ieee80211_hw *hw, u32 queues, bool drop)
......
......@@ -34,6 +34,8 @@ struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id);
void ath10k_reset_scan(unsigned long ptr);
void ath10k_offchan_tx_purge(struct ath10k *ar);
void ath10k_offchan_tx_work(struct work_struct *work);
void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar);
void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work);
void ath10k_halt(struct ath10k *ar);
static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
......
......@@ -720,18 +720,8 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
"ath10k tx: data: ",
nbuf->data, nbuf->len);
/* Make sure we have resources to handle this request */
spin_lock_bh(&pipe_info->pipe_lock);
if (!pipe_info->num_sends_allowed) {
ath10k_warn("Pipe: %d is full\n", pipe_id);
spin_unlock_bh(&pipe_info->pipe_lock);
return -ENOSR;
}
pipe_info->num_sends_allowed--;
spin_unlock_bh(&pipe_info->pipe_lock);
ret = ath10k_ce_sendlist_send(ce_hdl, nbuf, transfer_id,
skb_cb->paddr, len, flags);
ret = ath10k_ce_send(ce_hdl, nbuf, skb_cb->paddr, len, transfer_id,
flags);
if (ret)
ath10k_warn("CE send failed: %p\n", nbuf);
......@@ -741,14 +731,7 @@ static int ath10k_pci_hif_send_head(struct ath10k *ar, u8 pipe_id,
static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_pci_pipe *pipe_info = &(ar_pci->pipe_info[pipe]);
int ret;
spin_lock_bh(&pipe_info->pipe_lock);
ret = pipe_info->num_sends_allowed;
spin_unlock_bh(&pipe_info->pipe_lock);
return ret;
return ath10k_ce_num_free_src_entries(ar_pci->pipe_info[pipe].ce_hdl);
}
static void ath10k_pci_hif_dump_area(struct ath10k *ar)
......@@ -863,7 +846,6 @@ static int ath10k_pci_start_ce(struct ath10k *ar)
ath10k_pci_ce_send_done,
disable_interrupts);
completions += attr->src_nentries;
pipe_info->num_sends_allowed = attr->src_nentries - 1;
}
if (attr->dest_nentries) {
......@@ -1033,7 +1015,6 @@ static void ath10k_pci_process_ce(struct ath10k *ar)
*/
spin_lock_bh(&compl->pipe_info->pipe_lock);
list_add_tail(&compl->list, &compl->pipe_info->compl_free);
compl->pipe_info->num_sends_allowed += send_done;
spin_unlock_bh(&compl->pipe_info->pipe_lock);
}
......
......@@ -178,9 +178,6 @@ struct ath10k_pci_pipe {
/* List of free CE completion slots */
struct list_head compl_free;
/* Limit the number of outstanding send requests. */
int num_sends_allowed;
struct ath10k_pci *ar_pci;
struct tasklet_struct intr;
};
......
......@@ -23,6 +23,471 @@
#include "wmi.h"
#include "mac.h"
/* MAIN WMI cmd track */
static struct wmi_cmd_map wmi_cmd_map = {
.init_cmdid = WMI_INIT_CMDID,
.start_scan_cmdid = WMI_START_SCAN_CMDID,
.stop_scan_cmdid = WMI_STOP_SCAN_CMDID,
.scan_chan_list_cmdid = WMI_SCAN_CHAN_LIST_CMDID,
.scan_sch_prio_tbl_cmdid = WMI_SCAN_SCH_PRIO_TBL_CMDID,
.pdev_set_regdomain_cmdid = WMI_PDEV_SET_REGDOMAIN_CMDID,
.pdev_set_channel_cmdid = WMI_PDEV_SET_CHANNEL_CMDID,
.pdev_set_param_cmdid = WMI_PDEV_SET_PARAM_CMDID,
.pdev_pktlog_enable_cmdid = WMI_PDEV_PKTLOG_ENABLE_CMDID,
.pdev_pktlog_disable_cmdid = WMI_PDEV_PKTLOG_DISABLE_CMDID,
.pdev_set_wmm_params_cmdid = WMI_PDEV_SET_WMM_PARAMS_CMDID,
.pdev_set_ht_cap_ie_cmdid = WMI_PDEV_SET_HT_CAP_IE_CMDID,
.pdev_set_vht_cap_ie_cmdid = WMI_PDEV_SET_VHT_CAP_IE_CMDID,
.pdev_set_dscp_tid_map_cmdid = WMI_PDEV_SET_DSCP_TID_MAP_CMDID,
.pdev_set_quiet_mode_cmdid = WMI_PDEV_SET_QUIET_MODE_CMDID,
.pdev_green_ap_ps_enable_cmdid = WMI_PDEV_GREEN_AP_PS_ENABLE_CMDID,
.pdev_get_tpc_config_cmdid = WMI_PDEV_GET_TPC_CONFIG_CMDID,
.pdev_set_base_macaddr_cmdid = WMI_PDEV_SET_BASE_MACADDR_CMDID,
.vdev_create_cmdid = WMI_VDEV_CREATE_CMDID,
.vdev_delete_cmdid = WMI_VDEV_DELETE_CMDID,
.vdev_start_request_cmdid = WMI_VDEV_START_REQUEST_CMDID,
.vdev_restart_request_cmdid = WMI_VDEV_RESTART_REQUEST_CMDID,
.vdev_up_cmdid = WMI_VDEV_UP_CMDID,
.vdev_stop_cmdid = WMI_VDEV_STOP_CMDID,
.vdev_down_cmdid = WMI_VDEV_DOWN_CMDID,
.vdev_set_param_cmdid = WMI_VDEV_SET_PARAM_CMDID,
.vdev_install_key_cmdid = WMI_VDEV_INSTALL_KEY_CMDID,
.peer_create_cmdid = WMI_PEER_CREATE_CMDID,
.peer_delete_cmdid = WMI_PEER_DELETE_CMDID,
.peer_flush_tids_cmdid = WMI_PEER_FLUSH_TIDS_CMDID,
.peer_set_param_cmdid = WMI_PEER_SET_PARAM_CMDID,
.peer_assoc_cmdid = WMI_PEER_ASSOC_CMDID,
.peer_add_wds_entry_cmdid = WMI_PEER_ADD_WDS_ENTRY_CMDID,
.peer_remove_wds_entry_cmdid = WMI_PEER_REMOVE_WDS_ENTRY_CMDID,
.peer_mcast_group_cmdid = WMI_PEER_MCAST_GROUP_CMDID,
.bcn_tx_cmdid = WMI_BCN_TX_CMDID,
.pdev_send_bcn_cmdid = WMI_PDEV_SEND_BCN_CMDID,
.bcn_tmpl_cmdid = WMI_BCN_TMPL_CMDID,
.bcn_filter_rx_cmdid = WMI_BCN_FILTER_RX_CMDID,
.prb_req_filter_rx_cmdid = WMI_PRB_REQ_FILTER_RX_CMDID,
.mgmt_tx_cmdid = WMI_MGMT_TX_CMDID,
.prb_tmpl_cmdid = WMI_PRB_TMPL_CMDID,
.addba_clear_resp_cmdid = WMI_ADDBA_CLEAR_RESP_CMDID,
.addba_send_cmdid = WMI_ADDBA_SEND_CMDID,
.addba_status_cmdid = WMI_ADDBA_STATUS_CMDID,
.delba_send_cmdid = WMI_DELBA_SEND_CMDID,
.addba_set_resp_cmdid = WMI_ADDBA_SET_RESP_CMDID,
.send_singleamsdu_cmdid = WMI_SEND_SINGLEAMSDU_CMDID,
.sta_powersave_mode_cmdid = WMI_STA_POWERSAVE_MODE_CMDID,
.sta_powersave_param_cmdid = WMI_STA_POWERSAVE_PARAM_CMDID,
.sta_mimo_ps_mode_cmdid = WMI_STA_MIMO_PS_MODE_CMDID,
.pdev_dfs_enable_cmdid = WMI_PDEV_DFS_ENABLE_CMDID,
.pdev_dfs_disable_cmdid = WMI_PDEV_DFS_DISABLE_CMDID,
.roam_scan_mode = WMI_ROAM_SCAN_MODE,
.roam_scan_rssi_threshold = WMI_ROAM_SCAN_RSSI_THRESHOLD,
.roam_scan_period = WMI_ROAM_SCAN_PERIOD,
.roam_scan_rssi_change_threshold = WMI_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
.roam_ap_profile = WMI_ROAM_AP_PROFILE,
.ofl_scan_add_ap_profile = WMI_ROAM_AP_PROFILE,
.ofl_scan_remove_ap_profile = WMI_OFL_SCAN_REMOVE_AP_PROFILE,
.ofl_scan_period = WMI_OFL_SCAN_PERIOD,
.p2p_dev_set_device_info = WMI_P2P_DEV_SET_DEVICE_INFO,
.p2p_dev_set_discoverability = WMI_P2P_DEV_SET_DISCOVERABILITY,
.p2p_go_set_beacon_ie = WMI_P2P_GO_SET_BEACON_IE,
.p2p_go_set_probe_resp_ie = WMI_P2P_GO_SET_PROBE_RESP_IE,
.p2p_set_vendor_ie_data_cmdid = WMI_P2P_SET_VENDOR_IE_DATA_CMDID,
.ap_ps_peer_param_cmdid = WMI_AP_PS_PEER_PARAM_CMDID,
.ap_ps_peer_uapsd_coex_cmdid = WMI_AP_PS_PEER_UAPSD_COEX_CMDID,
.peer_rate_retry_sched_cmdid = WMI_PEER_RATE_RETRY_SCHED_CMDID,
.wlan_profile_trigger_cmdid = WMI_WLAN_PROFILE_TRIGGER_CMDID,
.wlan_profile_set_hist_intvl_cmdid =
WMI_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
.wlan_profile_get_profile_data_cmdid =
WMI_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
.wlan_profile_enable_profile_id_cmdid =
WMI_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
.wlan_profile_list_profile_id_cmdid =
WMI_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
.pdev_suspend_cmdid = WMI_PDEV_SUSPEND_CMDID,
.pdev_resume_cmdid = WMI_PDEV_RESUME_CMDID,
.add_bcn_filter_cmdid = WMI_ADD_BCN_FILTER_CMDID,
.rmv_bcn_filter_cmdid = WMI_RMV_BCN_FILTER_CMDID,
.wow_add_wake_pattern_cmdid = WMI_WOW_ADD_WAKE_PATTERN_CMDID,
.wow_del_wake_pattern_cmdid = WMI_WOW_DEL_WAKE_PATTERN_CMDID,
.wow_enable_disable_wake_event_cmdid =
WMI_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
.wow_enable_cmdid = WMI_WOW_ENABLE_CMDID,
.wow_hostwakeup_from_sleep_cmdid = WMI_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
.rtt_measreq_cmdid = WMI_RTT_MEASREQ_CMDID,
.rtt_tsf_cmdid = WMI_RTT_TSF_CMDID,
.vdev_spectral_scan_configure_cmdid =
WMI_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
.vdev_spectral_scan_enable_cmdid = WMI_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
.request_stats_cmdid = WMI_REQUEST_STATS_CMDID,
.set_arp_ns_offload_cmdid = WMI_SET_ARP_NS_OFFLOAD_CMDID,
.network_list_offload_config_cmdid =
WMI_NETWORK_LIST_OFFLOAD_CONFIG_CMDID,
.gtk_offload_cmdid = WMI_GTK_OFFLOAD_CMDID,
.csa_offload_enable_cmdid = WMI_CSA_OFFLOAD_ENABLE_CMDID,
.csa_offload_chanswitch_cmdid = WMI_CSA_OFFLOAD_CHANSWITCH_CMDID,
.chatter_set_mode_cmdid = WMI_CHATTER_SET_MODE_CMDID,
.peer_tid_addba_cmdid = WMI_PEER_TID_ADDBA_CMDID,
.peer_tid_delba_cmdid = WMI_PEER_TID_DELBA_CMDID,
.sta_dtim_ps_method_cmdid = WMI_STA_DTIM_PS_METHOD_CMDID,
.sta_uapsd_auto_trig_cmdid = WMI_STA_UAPSD_AUTO_TRIG_CMDID,
.sta_keepalive_cmd = WMI_STA_KEEPALIVE_CMD,
.echo_cmdid = WMI_ECHO_CMDID,
.pdev_utf_cmdid = WMI_PDEV_UTF_CMDID,
.dbglog_cfg_cmdid = WMI_DBGLOG_CFG_CMDID,
.pdev_qvit_cmdid = WMI_PDEV_QVIT_CMDID,
.pdev_ftm_intg_cmdid = WMI_PDEV_FTM_INTG_CMDID,
.vdev_set_keepalive_cmdid = WMI_VDEV_SET_KEEPALIVE_CMDID,
.vdev_get_keepalive_cmdid = WMI_VDEV_GET_KEEPALIVE_CMDID,
.force_fw_hang_cmdid = WMI_FORCE_FW_HANG_CMDID,
.gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID,
.gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID,
};
/* 10.X WMI cmd track */
static struct wmi_cmd_map wmi_10x_cmd_map = {
.init_cmdid = WMI_10X_INIT_CMDID,
.start_scan_cmdid = WMI_10X_START_SCAN_CMDID,
.stop_scan_cmdid = WMI_10X_STOP_SCAN_CMDID,
.scan_chan_list_cmdid = WMI_10X_SCAN_CHAN_LIST_CMDID,
.scan_sch_prio_tbl_cmdid = WMI_CMD_UNSUPPORTED,
.pdev_set_regdomain_cmdid = WMI_10X_PDEV_SET_REGDOMAIN_CMDID,
.pdev_set_channel_cmdid = WMI_10X_PDEV_SET_CHANNEL_CMDID,
.pdev_set_param_cmdid = WMI_10X_PDEV_SET_PARAM_CMDID,
.pdev_pktlog_enable_cmdid = WMI_10X_PDEV_PKTLOG_ENABLE_CMDID,
.pdev_pktlog_disable_cmdid = WMI_10X_PDEV_PKTLOG_DISABLE_CMDID,
.pdev_set_wmm_params_cmdid = WMI_10X_PDEV_SET_WMM_PARAMS_CMDID,
.pdev_set_ht_cap_ie_cmdid = WMI_10X_PDEV_SET_HT_CAP_IE_CMDID,
.pdev_set_vht_cap_ie_cmdid = WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID,
.pdev_set_dscp_tid_map_cmdid = WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID,
.pdev_set_quiet_mode_cmdid = WMI_10X_PDEV_SET_QUIET_MODE_CMDID,
.pdev_green_ap_ps_enable_cmdid = WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID,
.pdev_get_tpc_config_cmdid = WMI_10X_PDEV_GET_TPC_CONFIG_CMDID,
.pdev_set_base_macaddr_cmdid = WMI_10X_PDEV_SET_BASE_MACADDR_CMDID,
.vdev_create_cmdid = WMI_10X_VDEV_CREATE_CMDID,
.vdev_delete_cmdid = WMI_10X_VDEV_DELETE_CMDID,
.vdev_start_request_cmdid = WMI_10X_VDEV_START_REQUEST_CMDID,
.vdev_restart_request_cmdid = WMI_10X_VDEV_RESTART_REQUEST_CMDID,
.vdev_up_cmdid = WMI_10X_VDEV_UP_CMDID,
.vdev_stop_cmdid = WMI_10X_VDEV_STOP_CMDID,
.vdev_down_cmdid = WMI_10X_VDEV_DOWN_CMDID,
.vdev_set_param_cmdid = WMI_10X_VDEV_SET_PARAM_CMDID,
.vdev_install_key_cmdid = WMI_10X_VDEV_INSTALL_KEY_CMDID,
.peer_create_cmdid = WMI_10X_PEER_CREATE_CMDID,
.peer_delete_cmdid = WMI_10X_PEER_DELETE_CMDID,
.peer_flush_tids_cmdid = WMI_10X_PEER_FLUSH_TIDS_CMDID,
.peer_set_param_cmdid = WMI_10X_PEER_SET_PARAM_CMDID,
.peer_assoc_cmdid = WMI_10X_PEER_ASSOC_CMDID,
.peer_add_wds_entry_cmdid = WMI_10X_PEER_ADD_WDS_ENTRY_CMDID,
.peer_remove_wds_entry_cmdid = WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID,
.peer_mcast_group_cmdid = WMI_10X_PEER_MCAST_GROUP_CMDID,
.bcn_tx_cmdid = WMI_10X_BCN_TX_CMDID,
.pdev_send_bcn_cmdid = WMI_10X_PDEV_SEND_BCN_CMDID,
.bcn_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
.bcn_filter_rx_cmdid = WMI_10X_BCN_FILTER_RX_CMDID,
.prb_req_filter_rx_cmdid = WMI_10X_PRB_REQ_FILTER_RX_CMDID,
.mgmt_tx_cmdid = WMI_10X_MGMT_TX_CMDID,
.prb_tmpl_cmdid = WMI_CMD_UNSUPPORTED,
.addba_clear_resp_cmdid = WMI_10X_ADDBA_CLEAR_RESP_CMDID,
.addba_send_cmdid = WMI_10X_ADDBA_SEND_CMDID,
.addba_status_cmdid = WMI_10X_ADDBA_STATUS_CMDID,
.delba_send_cmdid = WMI_10X_DELBA_SEND_CMDID,
.addba_set_resp_cmdid = WMI_10X_ADDBA_SET_RESP_CMDID,
.send_singleamsdu_cmdid = WMI_10X_SEND_SINGLEAMSDU_CMDID,
.sta_powersave_mode_cmdid = WMI_10X_STA_POWERSAVE_MODE_CMDID,
.sta_powersave_param_cmdid = WMI_10X_STA_POWERSAVE_PARAM_CMDID,
.sta_mimo_ps_mode_cmdid = WMI_10X_STA_MIMO_PS_MODE_CMDID,
.pdev_dfs_enable_cmdid = WMI_10X_PDEV_DFS_ENABLE_CMDID,
.pdev_dfs_disable_cmdid = WMI_10X_PDEV_DFS_DISABLE_CMDID,
.roam_scan_mode = WMI_10X_ROAM_SCAN_MODE,
.roam_scan_rssi_threshold = WMI_10X_ROAM_SCAN_RSSI_THRESHOLD,
.roam_scan_period = WMI_10X_ROAM_SCAN_PERIOD,
.roam_scan_rssi_change_threshold =
WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
.roam_ap_profile = WMI_10X_ROAM_AP_PROFILE,
.ofl_scan_add_ap_profile = WMI_10X_OFL_SCAN_ADD_AP_PROFILE,
.ofl_scan_remove_ap_profile = WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE,
.ofl_scan_period = WMI_10X_OFL_SCAN_PERIOD,
.p2p_dev_set_device_info = WMI_10X_P2P_DEV_SET_DEVICE_INFO,
.p2p_dev_set_discoverability = WMI_10X_P2P_DEV_SET_DISCOVERABILITY,
.p2p_go_set_beacon_ie = WMI_10X_P2P_GO_SET_BEACON_IE,
.p2p_go_set_probe_resp_ie = WMI_10X_P2P_GO_SET_PROBE_RESP_IE,
.p2p_set_vendor_ie_data_cmdid = WMI_CMD_UNSUPPORTED,
.ap_ps_peer_param_cmdid = WMI_CMD_UNSUPPORTED,
.ap_ps_peer_uapsd_coex_cmdid = WMI_CMD_UNSUPPORTED,
.peer_rate_retry_sched_cmdid = WMI_10X_PEER_RATE_RETRY_SCHED_CMDID,
.wlan_profile_trigger_cmdid = WMI_10X_WLAN_PROFILE_TRIGGER_CMDID,
.wlan_profile_set_hist_intvl_cmdid =
WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
.wlan_profile_get_profile_data_cmdid =
WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
.wlan_profile_enable_profile_id_cmdid =
WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
.wlan_profile_list_profile_id_cmdid =
WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
.pdev_suspend_cmdid = WMI_10X_PDEV_SUSPEND_CMDID,
.pdev_resume_cmdid = WMI_10X_PDEV_RESUME_CMDID,
.add_bcn_filter_cmdid = WMI_10X_ADD_BCN_FILTER_CMDID,
.rmv_bcn_filter_cmdid = WMI_10X_RMV_BCN_FILTER_CMDID,
.wow_add_wake_pattern_cmdid = WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID,
.wow_del_wake_pattern_cmdid = WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID,
.wow_enable_disable_wake_event_cmdid =
WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
.wow_enable_cmdid = WMI_10X_WOW_ENABLE_CMDID,
.wow_hostwakeup_from_sleep_cmdid =
WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
.rtt_measreq_cmdid = WMI_10X_RTT_MEASREQ_CMDID,
.rtt_tsf_cmdid = WMI_10X_RTT_TSF_CMDID,
.vdev_spectral_scan_configure_cmdid =
WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
.vdev_spectral_scan_enable_cmdid =
WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
.request_stats_cmdid = WMI_10X_REQUEST_STATS_CMDID,
.set_arp_ns_offload_cmdid = WMI_CMD_UNSUPPORTED,
.network_list_offload_config_cmdid = WMI_CMD_UNSUPPORTED,
.gtk_offload_cmdid = WMI_CMD_UNSUPPORTED,
.csa_offload_enable_cmdid = WMI_CMD_UNSUPPORTED,
.csa_offload_chanswitch_cmdid = WMI_CMD_UNSUPPORTED,
.chatter_set_mode_cmdid = WMI_CMD_UNSUPPORTED,
.peer_tid_addba_cmdid = WMI_CMD_UNSUPPORTED,
.peer_tid_delba_cmdid = WMI_CMD_UNSUPPORTED,
.sta_dtim_ps_method_cmdid = WMI_CMD_UNSUPPORTED,
.sta_uapsd_auto_trig_cmdid = WMI_CMD_UNSUPPORTED,
.sta_keepalive_cmd = WMI_CMD_UNSUPPORTED,
.echo_cmdid = WMI_10X_ECHO_CMDID,
.pdev_utf_cmdid = WMI_10X_PDEV_UTF_CMDID,
.dbglog_cfg_cmdid = WMI_10X_DBGLOG_CFG_CMDID,
.pdev_qvit_cmdid = WMI_10X_PDEV_QVIT_CMDID,
.pdev_ftm_intg_cmdid = WMI_CMD_UNSUPPORTED,
.vdev_set_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
.vdev_get_keepalive_cmdid = WMI_CMD_UNSUPPORTED,
.force_fw_hang_cmdid = WMI_CMD_UNSUPPORTED,
.gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID,
.gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID,
};
/* MAIN WMI VDEV param map */
static struct wmi_vdev_param_map wmi_vdev_param_map = {
.rts_threshold = WMI_VDEV_PARAM_RTS_THRESHOLD,
.fragmentation_threshold = WMI_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
.beacon_interval = WMI_VDEV_PARAM_BEACON_INTERVAL,
.listen_interval = WMI_VDEV_PARAM_LISTEN_INTERVAL,
.multicast_rate = WMI_VDEV_PARAM_MULTICAST_RATE,
.mgmt_tx_rate = WMI_VDEV_PARAM_MGMT_TX_RATE,
.slot_time = WMI_VDEV_PARAM_SLOT_TIME,
.preamble = WMI_VDEV_PARAM_PREAMBLE,
.swba_time = WMI_VDEV_PARAM_SWBA_TIME,
.wmi_vdev_stats_update_period = WMI_VDEV_STATS_UPDATE_PERIOD,
.wmi_vdev_pwrsave_ageout_time = WMI_VDEV_PWRSAVE_AGEOUT_TIME,
.wmi_vdev_host_swba_interval = WMI_VDEV_HOST_SWBA_INTERVAL,
.dtim_period = WMI_VDEV_PARAM_DTIM_PERIOD,
.wmi_vdev_oc_scheduler_air_time_limit =
WMI_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
.wds = WMI_VDEV_PARAM_WDS,
.atim_window = WMI_VDEV_PARAM_ATIM_WINDOW,
.bmiss_count_max = WMI_VDEV_PARAM_BMISS_COUNT_MAX,
.bmiss_first_bcnt = WMI_VDEV_PARAM_BMISS_FIRST_BCNT,
.bmiss_final_bcnt = WMI_VDEV_PARAM_BMISS_FINAL_BCNT,
.feature_wmm = WMI_VDEV_PARAM_FEATURE_WMM,
.chwidth = WMI_VDEV_PARAM_CHWIDTH,
.chextoffset = WMI_VDEV_PARAM_CHEXTOFFSET,
.disable_htprotection = WMI_VDEV_PARAM_DISABLE_HTPROTECTION,
.sta_quickkickout = WMI_VDEV_PARAM_STA_QUICKKICKOUT,
.mgmt_rate = WMI_VDEV_PARAM_MGMT_RATE,
.protection_mode = WMI_VDEV_PARAM_PROTECTION_MODE,
.fixed_rate = WMI_VDEV_PARAM_FIXED_RATE,
.sgi = WMI_VDEV_PARAM_SGI,
.ldpc = WMI_VDEV_PARAM_LDPC,
.tx_stbc = WMI_VDEV_PARAM_TX_STBC,
.rx_stbc = WMI_VDEV_PARAM_RX_STBC,
.intra_bss_fwd = WMI_VDEV_PARAM_INTRA_BSS_FWD,
.def_keyid = WMI_VDEV_PARAM_DEF_KEYID,
.nss = WMI_VDEV_PARAM_NSS,
.bcast_data_rate = WMI_VDEV_PARAM_BCAST_DATA_RATE,
.mcast_data_rate = WMI_VDEV_PARAM_MCAST_DATA_RATE,
.mcast_indicate = WMI_VDEV_PARAM_MCAST_INDICATE,
.dhcp_indicate = WMI_VDEV_PARAM_DHCP_INDICATE,
.unknown_dest_indicate = WMI_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
.ap_keepalive_min_idle_inactive_time_secs =
WMI_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
.ap_keepalive_max_idle_inactive_time_secs =
WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
.ap_keepalive_max_unresponsive_time_secs =
WMI_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
.ap_enable_nawds = WMI_VDEV_PARAM_AP_ENABLE_NAWDS,
.mcast2ucast_set = WMI_VDEV_PARAM_UNSUPPORTED,
.enable_rtscts = WMI_VDEV_PARAM_ENABLE_RTSCTS,
.txbf = WMI_VDEV_PARAM_TXBF,
.packet_powersave = WMI_VDEV_PARAM_PACKET_POWERSAVE,
.drop_unencry = WMI_VDEV_PARAM_DROP_UNENCRY,
.tx_encap_type = WMI_VDEV_PARAM_TX_ENCAP_TYPE,
.ap_detect_out_of_sync_sleeping_sta_time_secs =
WMI_VDEV_PARAM_UNSUPPORTED,
};
/* 10.X WMI VDEV param map */
static struct wmi_vdev_param_map wmi_10x_vdev_param_map = {
.rts_threshold = WMI_10X_VDEV_PARAM_RTS_THRESHOLD,
.fragmentation_threshold = WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
.beacon_interval = WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
.listen_interval = WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
.multicast_rate = WMI_10X_VDEV_PARAM_MULTICAST_RATE,
.mgmt_tx_rate = WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
.slot_time = WMI_10X_VDEV_PARAM_SLOT_TIME,
.preamble = WMI_10X_VDEV_PARAM_PREAMBLE,
.swba_time = WMI_10X_VDEV_PARAM_SWBA_TIME,
.wmi_vdev_stats_update_period = WMI_10X_VDEV_STATS_UPDATE_PERIOD,
.wmi_vdev_pwrsave_ageout_time = WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
.wmi_vdev_host_swba_interval = WMI_10X_VDEV_HOST_SWBA_INTERVAL,
.dtim_period = WMI_10X_VDEV_PARAM_DTIM_PERIOD,
.wmi_vdev_oc_scheduler_air_time_limit =
WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
.wds = WMI_10X_VDEV_PARAM_WDS,
.atim_window = WMI_10X_VDEV_PARAM_ATIM_WINDOW,
.bmiss_count_max = WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
.bmiss_first_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
.bmiss_final_bcnt = WMI_VDEV_PARAM_UNSUPPORTED,
.feature_wmm = WMI_10X_VDEV_PARAM_FEATURE_WMM,
.chwidth = WMI_10X_VDEV_PARAM_CHWIDTH,
.chextoffset = WMI_10X_VDEV_PARAM_CHEXTOFFSET,
.disable_htprotection = WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
.sta_quickkickout = WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
.mgmt_rate = WMI_10X_VDEV_PARAM_MGMT_RATE,
.protection_mode = WMI_10X_VDEV_PARAM_PROTECTION_MODE,
.fixed_rate = WMI_10X_VDEV_PARAM_FIXED_RATE,
.sgi = WMI_10X_VDEV_PARAM_SGI,
.ldpc = WMI_10X_VDEV_PARAM_LDPC,
.tx_stbc = WMI_10X_VDEV_PARAM_TX_STBC,
.rx_stbc = WMI_10X_VDEV_PARAM_RX_STBC,
.intra_bss_fwd = WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
.def_keyid = WMI_10X_VDEV_PARAM_DEF_KEYID,
.nss = WMI_10X_VDEV_PARAM_NSS,
.bcast_data_rate = WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
.mcast_data_rate = WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
.mcast_indicate = WMI_10X_VDEV_PARAM_MCAST_INDICATE,
.dhcp_indicate = WMI_10X_VDEV_PARAM_DHCP_INDICATE,
.unknown_dest_indicate = WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
.ap_keepalive_min_idle_inactive_time_secs =
WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
.ap_keepalive_max_idle_inactive_time_secs =
WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
.ap_keepalive_max_unresponsive_time_secs =
WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
.ap_enable_nawds = WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
.mcast2ucast_set = WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
.enable_rtscts = WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
.txbf = WMI_VDEV_PARAM_UNSUPPORTED,
.packet_powersave = WMI_VDEV_PARAM_UNSUPPORTED,
.drop_unencry = WMI_VDEV_PARAM_UNSUPPORTED,
.tx_encap_type = WMI_VDEV_PARAM_UNSUPPORTED,
.ap_detect_out_of_sync_sleeping_sta_time_secs =
WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
};
static struct wmi_pdev_param_map wmi_pdev_param_map = {
.tx_chain_mask = WMI_PDEV_PARAM_TX_CHAIN_MASK,
.rx_chain_mask = WMI_PDEV_PARAM_RX_CHAIN_MASK,
.txpower_limit2g = WMI_PDEV_PARAM_TXPOWER_LIMIT2G,
.txpower_limit5g = WMI_PDEV_PARAM_TXPOWER_LIMIT5G,
.txpower_scale = WMI_PDEV_PARAM_TXPOWER_SCALE,
.beacon_gen_mode = WMI_PDEV_PARAM_BEACON_GEN_MODE,
.beacon_tx_mode = WMI_PDEV_PARAM_BEACON_TX_MODE,
.resmgr_offchan_mode = WMI_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
.protection_mode = WMI_PDEV_PARAM_PROTECTION_MODE,
.dynamic_bw = WMI_PDEV_PARAM_DYNAMIC_BW,
.non_agg_sw_retry_th = WMI_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
.agg_sw_retry_th = WMI_PDEV_PARAM_AGG_SW_RETRY_TH,
.sta_kickout_th = WMI_PDEV_PARAM_STA_KICKOUT_TH,
.ac_aggrsize_scaling = WMI_PDEV_PARAM_AC_AGGRSIZE_SCALING,
.ltr_enable = WMI_PDEV_PARAM_LTR_ENABLE,
.ltr_ac_latency_be = WMI_PDEV_PARAM_LTR_AC_LATENCY_BE,
.ltr_ac_latency_bk = WMI_PDEV_PARAM_LTR_AC_LATENCY_BK,
.ltr_ac_latency_vi = WMI_PDEV_PARAM_LTR_AC_LATENCY_VI,
.ltr_ac_latency_vo = WMI_PDEV_PARAM_LTR_AC_LATENCY_VO,
.ltr_ac_latency_timeout = WMI_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
.ltr_sleep_override = WMI_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
.ltr_rx_override = WMI_PDEV_PARAM_LTR_RX_OVERRIDE,
.ltr_tx_activity_timeout = WMI_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
.l1ss_enable = WMI_PDEV_PARAM_L1SS_ENABLE,
.dsleep_enable = WMI_PDEV_PARAM_DSLEEP_ENABLE,
.pcielp_txbuf_flush = WMI_PDEV_PARAM_PCIELP_TXBUF_FLUSH,
.pcielp_txbuf_watermark = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
.pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_EN,
.pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_PCIELP_TXBUF_TMO_VALUE,
.pdev_stats_update_period = WMI_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
.vdev_stats_update_period = WMI_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
.peer_stats_update_period = WMI_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
.bcnflt_stats_update_period = WMI_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
.pmf_qos = WMI_PDEV_PARAM_PMF_QOS,
.arp_ac_override = WMI_PDEV_PARAM_ARP_AC_OVERRIDE,
.arpdhcp_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
.dcs = WMI_PDEV_PARAM_DCS,
.ani_enable = WMI_PDEV_PARAM_ANI_ENABLE,
.ani_poll_period = WMI_PDEV_PARAM_ANI_POLL_PERIOD,
.ani_listen_period = WMI_PDEV_PARAM_ANI_LISTEN_PERIOD,
.ani_ofdm_level = WMI_PDEV_PARAM_ANI_OFDM_LEVEL,
.ani_cck_level = WMI_PDEV_PARAM_ANI_CCK_LEVEL,
.dyntxchain = WMI_PDEV_PARAM_DYNTXCHAIN,
.proxy_sta = WMI_PDEV_PARAM_PROXY_STA,
.idle_ps_config = WMI_PDEV_PARAM_IDLE_PS_CONFIG,
.power_gating_sleep = WMI_PDEV_PARAM_POWER_GATING_SLEEP,
.fast_channel_reset = WMI_PDEV_PARAM_UNSUPPORTED,
.burst_dur = WMI_PDEV_PARAM_UNSUPPORTED,
.burst_enable = WMI_PDEV_PARAM_UNSUPPORTED,
};
static struct wmi_pdev_param_map wmi_10x_pdev_param_map = {
.tx_chain_mask = WMI_10X_PDEV_PARAM_TX_CHAIN_MASK,
.rx_chain_mask = WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
.txpower_limit2g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
.txpower_limit5g = WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
.txpower_scale = WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
.beacon_gen_mode = WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
.beacon_tx_mode = WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
.resmgr_offchan_mode = WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
.protection_mode = WMI_10X_PDEV_PARAM_PROTECTION_MODE,
.dynamic_bw = WMI_10X_PDEV_PARAM_DYNAMIC_BW,
.non_agg_sw_retry_th = WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
.agg_sw_retry_th = WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
.sta_kickout_th = WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
.ac_aggrsize_scaling = WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
.ltr_enable = WMI_10X_PDEV_PARAM_LTR_ENABLE,
.ltr_ac_latency_be = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
.ltr_ac_latency_bk = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
.ltr_ac_latency_vi = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
.ltr_ac_latency_vo = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
.ltr_ac_latency_timeout = WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
.ltr_sleep_override = WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
.ltr_rx_override = WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
.ltr_tx_activity_timeout = WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
.l1ss_enable = WMI_10X_PDEV_PARAM_L1SS_ENABLE,
.dsleep_enable = WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
.pcielp_txbuf_flush = WMI_PDEV_PARAM_UNSUPPORTED,
.pcielp_txbuf_watermark = WMI_PDEV_PARAM_UNSUPPORTED,
.pcielp_txbuf_tmo_en = WMI_PDEV_PARAM_UNSUPPORTED,
.pcielp_txbuf_tmo_value = WMI_PDEV_PARAM_UNSUPPORTED,
.pdev_stats_update_period = WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
.vdev_stats_update_period = WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
.peer_stats_update_period = WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
.bcnflt_stats_update_period =
WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
.pmf_qos = WMI_10X_PDEV_PARAM_PMF_QOS,
.arp_ac_override = WMI_PDEV_PARAM_UNSUPPORTED,
.arpdhcp_ac_override = WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
.dcs = WMI_10X_PDEV_PARAM_DCS,
.ani_enable = WMI_10X_PDEV_PARAM_ANI_ENABLE,
.ani_poll_period = WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
.ani_listen_period = WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
.ani_ofdm_level = WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
.ani_cck_level = WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
.dyntxchain = WMI_10X_PDEV_PARAM_DYNTXCHAIN,
.proxy_sta = WMI_PDEV_PARAM_UNSUPPORTED,
.idle_ps_config = WMI_PDEV_PARAM_UNSUPPORTED,
.power_gating_sleep = WMI_PDEV_PARAM_UNSUPPORTED,
.fast_channel_reset = WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
.burst_dur = WMI_10X_PDEV_PARAM_BURST_DUR,
.burst_enable = WMI_10X_PDEV_PARAM_BURST_ENABLE,
};
int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
{
int ret;
......@@ -64,7 +529,7 @@ static void ath10k_wmi_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb)
}
static int ath10k_wmi_cmd_send_nowait(struct ath10k *ar, struct sk_buff *skb,
enum wmi_cmd_id cmd_id)
u32 cmd_id)
{
struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
struct wmi_cmd_hdr *cmd_hdr;
......@@ -144,9 +609,17 @@ static void ath10k_wmi_op_ep_tx_credits(struct ath10k *ar)
}
static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
enum wmi_cmd_id cmd_id)
u32 cmd_id)
{
int ret = -EINVAL;
int ret = -EOPNOTSUPP;
might_sleep();
if (cmd_id == WMI_CMD_UNSUPPORTED) {
ath10k_warn("wmi command %d is not supported by firmware\n",
cmd_id);
return ret;
}
wait_event_timeout(ar->wmi.tx_credits_wq, ({
/* try to send pending beacons first. they take priority */
......@@ -162,6 +635,57 @@ static int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb,
return ret;
}
int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
{
int ret = 0;
struct wmi_mgmt_tx_cmd *cmd;
struct ieee80211_hdr *hdr;
struct sk_buff *wmi_skb;
struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
int len;
u16 fc;
hdr = (struct ieee80211_hdr *)skb->data;
fc = le16_to_cpu(hdr->frame_control);
if (WARN_ON_ONCE(!ieee80211_is_mgmt(hdr->frame_control)))
return -EINVAL;
len = sizeof(cmd->hdr) + skb->len;
len = round_up(len, 4);
wmi_skb = ath10k_wmi_alloc_skb(len);
if (!wmi_skb)
return -ENOMEM;
cmd = (struct wmi_mgmt_tx_cmd *)wmi_skb->data;
cmd->hdr.vdev_id = __cpu_to_le32(ATH10K_SKB_CB(skb)->vdev_id);
cmd->hdr.tx_rate = 0;
cmd->hdr.tx_power = 0;
cmd->hdr.buf_len = __cpu_to_le32((u32)(skb->len));
memcpy(cmd->hdr.peer_macaddr.addr, ieee80211_get_DA(hdr), ETH_ALEN);
memcpy(cmd->buf, skb->data, skb->len);
ath10k_dbg(ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
wmi_skb, wmi_skb->len, fc & IEEE80211_FCTL_FTYPE,
fc & IEEE80211_FCTL_STYPE);
/* Send the management frame buffer to the target */
ret = ath10k_wmi_cmd_send(ar, wmi_skb, ar->wmi.cmd->mgmt_tx_cmdid);
if (ret) {
dev_kfree_skb_any(skb);
return ret;
}
/* TODO: report tx status to mac80211 - temporary just ACK */
info->flags |= IEEE80211_TX_STAT_ACK;
ieee80211_tx_status_irqsafe(ar->hw, skb);
return ret;
}
static int ath10k_wmi_event_scan(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_scan_event *event = (struct wmi_scan_event *)skb->data;
......@@ -964,6 +1488,55 @@ static void ath10k_wmi_event_vdev_install_key_complete(struct ath10k *ar,
ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_INSTALL_KEY_COMPLETE_EVENTID\n");
}
static void ath10k_wmi_event_inst_rssi_stats(struct ath10k *ar,
struct sk_buff *skb)
{
ath10k_dbg(ATH10K_DBG_WMI, "WMI_INST_RSSI_STATS_EVENTID\n");
}
static void ath10k_wmi_event_vdev_standby_req(struct ath10k *ar,
struct sk_buff *skb)
{
ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_STANDBY_REQ_EVENTID\n");
}
static void ath10k_wmi_event_vdev_resume_req(struct ath10k *ar,
struct sk_buff *skb)
{
ath10k_dbg(ATH10K_DBG_WMI, "WMI_VDEV_RESUME_REQ_EVENTID\n");
}
static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
u32 num_units, u32 unit_len)
{
dma_addr_t paddr;
u32 pool_size;
int idx = ar->wmi.num_mem_chunks;
pool_size = num_units * round_up(unit_len, 4);
if (!pool_size)
return -EINVAL;
ar->wmi.mem_chunks[idx].vaddr = dma_alloc_coherent(ar->dev,
pool_size,
&paddr,
GFP_ATOMIC);
if (!ar->wmi.mem_chunks[idx].vaddr) {
ath10k_warn("failed to allocate memory chunk\n");
return -ENOMEM;
}
memset(ar->wmi.mem_chunks[idx].vaddr, 0, pool_size);
ar->wmi.mem_chunks[idx].paddr = paddr;
ar->wmi.mem_chunks[idx].len = pool_size;
ar->wmi.mem_chunks[idx].req_id = req_id;
ar->wmi.num_mem_chunks++;
return 0;
}
static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
struct sk_buff *skb)
{
......@@ -988,7 +1561,8 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
ar->phy_capability = __le32_to_cpu(ev->phy_capability);
ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
if (ar->fw_version_build > 636)
/* only manually set fw features when not using FW IE format */
if (ar->fw_api == 1 && ar->fw_version_build > 636)
set_bit(ATH10K_FW_FEATURE_EXT_WMI_MGMT_RX, ar->fw_features);
if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
......@@ -1035,6 +1609,108 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
complete(&ar->wmi.service_ready);
}
static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar,
struct sk_buff *skb)
{
u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
int ret;
struct wmi_service_ready_event_10x *ev = (void *)skb->data;
if (skb->len < sizeof(*ev)) {
ath10k_warn("Service ready event was %d B but expected %zu B. Wrong firmware version?\n",
skb->len, sizeof(*ev));
return;
}
ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power);
ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power);
ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info);
ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info);
ar->fw_version_major =
(__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24;
ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff);
ar->phy_capability = __le32_to_cpu(ev->phy_capability);
ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
ath10k_warn("hardware advertises support for more spatial streams than it should (%d > %d)\n",
ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
}
ar->ath_common.regulatory.current_rd =
__le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd);
ath10k_debug_read_service_map(ar, ev->wmi_service_bitmap,
sizeof(ev->wmi_service_bitmap));
if (strlen(ar->hw->wiphy->fw_version) == 0) {
snprintf(ar->hw->wiphy->fw_version,
sizeof(ar->hw->wiphy->fw_version),
"%u.%u",
ar->fw_version_major,
ar->fw_version_minor);
}
num_mem_reqs = __le32_to_cpu(ev->num_mem_reqs);
if (num_mem_reqs > ATH10K_MAX_MEM_REQS) {
ath10k_warn("requested memory chunks number (%d) exceeds the limit\n",
num_mem_reqs);
return;
}
if (!num_mem_reqs)
goto exit;
ath10k_dbg(ATH10K_DBG_WMI, "firmware has requested %d memory chunks\n",
num_mem_reqs);
for (i = 0; i < num_mem_reqs; ++i) {
req_id = __le32_to_cpu(ev->mem_reqs[i].req_id);
num_units = __le32_to_cpu(ev->mem_reqs[i].num_units);
unit_size = __le32_to_cpu(ev->mem_reqs[i].unit_size);
num_unit_info = __le32_to_cpu(ev->mem_reqs[i].num_unit_info);
if (num_unit_info & NUM_UNITS_IS_NUM_PEERS)
/* number of units to allocate is number of
* peers, 1 extra for self peer on target */
/* this needs to be tied, host and target
* can get out of sync */
num_units = TARGET_10X_NUM_PEERS + 1;
else if (num_unit_info & NUM_UNITS_IS_NUM_VDEVS)
num_units = TARGET_10X_NUM_VDEVS + 1;
ath10k_dbg(ATH10K_DBG_WMI,
"wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n",
req_id,
__le32_to_cpu(ev->mem_reqs[i].num_units),
num_unit_info,
unit_size,
num_units);
ret = ath10k_wmi_alloc_host_mem(ar, req_id, num_units,
unit_size);
if (ret)
return;
}
exit:
ath10k_dbg(ATH10K_DBG_WMI,
"wmi event service ready sw_ver 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n",
__le32_to_cpu(ev->sw_version),
__le32_to_cpu(ev->abi_version),
__le32_to_cpu(ev->phy_capability),
__le32_to_cpu(ev->ht_cap_info),
__le32_to_cpu(ev->vht_cap_info),
__le32_to_cpu(ev->vht_supp_mcs),
__le32_to_cpu(ev->sys_cap_info),
__le32_to_cpu(ev->num_mem_reqs),
__le32_to_cpu(ev->num_rf_chains));
complete(&ar->wmi.service_ready);
}
static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_ready_event *ev = (struct wmi_ready_event *)skb->data;
......@@ -1055,7 +1731,7 @@ static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)
return 0;
}
static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
static void ath10k_wmi_main_process_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
enum wmi_event_id id;
......@@ -1174,9 +1850,138 @@ static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
dev_kfree_skb(skb);
}
static void ath10k_wmi_10x_process_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
enum wmi_10x_event_id id;
u16 len;
cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
if (skb_pull(skb, sizeof(struct wmi_cmd_hdr)) == NULL)
return;
len = skb->len;
trace_ath10k_wmi_event(id, skb->data, skb->len);
switch (id) {
case WMI_10X_MGMT_RX_EVENTID:
ath10k_wmi_event_mgmt_rx(ar, skb);
/* mgmt_rx() owns the skb now! */
return;
case WMI_10X_SCAN_EVENTID:
ath10k_wmi_event_scan(ar, skb);
break;
case WMI_10X_CHAN_INFO_EVENTID:
ath10k_wmi_event_chan_info(ar, skb);
break;
case WMI_10X_ECHO_EVENTID:
ath10k_wmi_event_echo(ar, skb);
break;
case WMI_10X_DEBUG_MESG_EVENTID:
ath10k_wmi_event_debug_mesg(ar, skb);
break;
case WMI_10X_UPDATE_STATS_EVENTID:
ath10k_wmi_event_update_stats(ar, skb);
break;
case WMI_10X_VDEV_START_RESP_EVENTID:
ath10k_wmi_event_vdev_start_resp(ar, skb);
break;
case WMI_10X_VDEV_STOPPED_EVENTID:
ath10k_wmi_event_vdev_stopped(ar, skb);
break;
case WMI_10X_PEER_STA_KICKOUT_EVENTID:
ath10k_wmi_event_peer_sta_kickout(ar, skb);
break;
case WMI_10X_HOST_SWBA_EVENTID:
ath10k_wmi_event_host_swba(ar, skb);
break;
case WMI_10X_TBTTOFFSET_UPDATE_EVENTID:
ath10k_wmi_event_tbttoffset_update(ar, skb);
break;
case WMI_10X_PHYERR_EVENTID:
ath10k_wmi_event_phyerr(ar, skb);
break;
case WMI_10X_ROAM_EVENTID:
ath10k_wmi_event_roam(ar, skb);
break;
case WMI_10X_PROFILE_MATCH:
ath10k_wmi_event_profile_match(ar, skb);
break;
case WMI_10X_DEBUG_PRINT_EVENTID:
ath10k_wmi_event_debug_print(ar, skb);
break;
case WMI_10X_PDEV_QVIT_EVENTID:
ath10k_wmi_event_pdev_qvit(ar, skb);
break;
case WMI_10X_WLAN_PROFILE_DATA_EVENTID:
ath10k_wmi_event_wlan_profile_data(ar, skb);
break;
case WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID:
ath10k_wmi_event_rtt_measurement_report(ar, skb);
break;
case WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID:
ath10k_wmi_event_tsf_measurement_report(ar, skb);
break;
case WMI_10X_RTT_ERROR_REPORT_EVENTID:
ath10k_wmi_event_rtt_error_report(ar, skb);
break;
case WMI_10X_WOW_WAKEUP_HOST_EVENTID:
ath10k_wmi_event_wow_wakeup_host(ar, skb);
break;
case WMI_10X_DCS_INTERFERENCE_EVENTID:
ath10k_wmi_event_dcs_interference(ar, skb);
break;
case WMI_10X_PDEV_TPC_CONFIG_EVENTID:
ath10k_wmi_event_pdev_tpc_config(ar, skb);
break;
case WMI_10X_INST_RSSI_STATS_EVENTID:
ath10k_wmi_event_inst_rssi_stats(ar, skb);
break;
case WMI_10X_VDEV_STANDBY_REQ_EVENTID:
ath10k_wmi_event_vdev_standby_req(ar, skb);
break;
case WMI_10X_VDEV_RESUME_REQ_EVENTID:
ath10k_wmi_event_vdev_resume_req(ar, skb);
break;
case WMI_10X_SERVICE_READY_EVENTID:
ath10k_wmi_10x_service_ready_event_rx(ar, skb);
break;
case WMI_10X_READY_EVENTID:
ath10k_wmi_ready_event_rx(ar, skb);
break;
default:
ath10k_warn("Unknown eventid: %d\n", id);
break;
}
dev_kfree_skb(skb);
}
static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
{
if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
ath10k_wmi_10x_process_rx(ar, skb);
else
ath10k_wmi_main_process_rx(ar, skb);
}
/* WMI Initialization functions */
int ath10k_wmi_attach(struct ath10k *ar)
{
if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
ar->wmi.cmd = &wmi_10x_cmd_map;
ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
} else {
ar->wmi.cmd = &wmi_cmd_map;
ar->wmi.vdev_param = &wmi_vdev_param_map;
ar->wmi.pdev_param = &wmi_pdev_param_map;
}
init_completion(&ar->wmi.service_ready);
init_completion(&ar->wmi.unified_ready);
init_waitqueue_head(&ar->wmi.tx_credits_wq);
......@@ -1186,6 +1991,17 @@ int ath10k_wmi_attach(struct ath10k *ar)
void ath10k_wmi_detach(struct ath10k *ar)
{
int i;
/* free the host memory chunks requested by firmware */
for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
dma_free_coherent(ar->dev,
ar->wmi.mem_chunks[i].len,
ar->wmi.mem_chunks[i].vaddr,
ar->wmi.mem_chunks[i].paddr);
}
ar->wmi.num_mem_chunks = 0;
}
int ath10k_wmi_connect_htc_service(struct ath10k *ar)
......@@ -1237,7 +2053,8 @@ int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
"wmi pdev regdomain rd %x rd2g %x rd5g %x ctl2g %x ctl5g %x\n",
rd, rd2g, rd5g, ctl2g, ctl5g);
return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_REGDOMAIN_CMDID);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->pdev_set_regdomain_cmdid);
}
int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
......@@ -1267,7 +2084,8 @@ int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
"wmi set channel mode %d freq %d\n",
arg->mode, arg->freq);
return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_CHANNEL_CMDID);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->pdev_set_channel_cmdid);
}
int ath10k_wmi_pdev_suspend_target(struct ath10k *ar)
......@@ -1282,7 +2100,7 @@ int ath10k_wmi_pdev_suspend_target(struct ath10k *ar)
cmd = (struct wmi_pdev_suspend_cmd *)skb->data;
cmd->suspend_opt = WMI_PDEV_SUSPEND;
return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SUSPEND_CMDID);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_suspend_cmdid);
}
int ath10k_wmi_pdev_resume_target(struct ath10k *ar)
......@@ -1293,15 +2111,19 @@ int ath10k_wmi_pdev_resume_target(struct ath10k *ar)
if (skb == NULL)
return -ENOMEM;
return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_RESUME_CMDID);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_resume_cmdid);
}
int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id,
u32 value)
int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
{
struct wmi_pdev_set_param_cmd *cmd;
struct sk_buff *skb;
if (id == WMI_PDEV_PARAM_UNSUPPORTED) {
ath10k_warn("pdev param %d not supported by firmware\n", id);
return -EOPNOTSUPP;
}
skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
if (!skb)
return -ENOMEM;
......@@ -1312,15 +2134,16 @@ int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id,
ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set param %d value %d\n",
id, value);
return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_PARAM_CMDID);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
}
int ath10k_wmi_cmd_init(struct ath10k *ar)
static int ath10k_wmi_main_cmd_init(struct ath10k *ar)
{
struct wmi_init_cmd *cmd;
struct sk_buff *buf;
struct wmi_resource_config config = {};
u32 val;
u32 len, val;
int i;
config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS + TARGET_NUM_VDEVS);
......@@ -1373,22 +2196,157 @@ int ath10k_wmi_cmd_init(struct ath10k *ar)
config.num_msdu_desc = __cpu_to_le32(TARGET_NUM_MSDU_DESC);
config.max_frag_entries = __cpu_to_le32(TARGET_MAX_FRAG_ENTRIES);
buf = ath10k_wmi_alloc_skb(sizeof(*cmd));
len = sizeof(*cmd) +
(sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
buf = ath10k_wmi_alloc_skb(len);
if (!buf)
return -ENOMEM;
cmd = (struct wmi_init_cmd *)buf->data;
if (ar->wmi.num_mem_chunks == 0) {
cmd->num_host_mem_chunks = 0;
goto out;
}
ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
__cpu_to_le32(ar->wmi.num_mem_chunks));
cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
cmd->host_mem_chunks[i].ptr =
__cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
cmd->host_mem_chunks[i].size =
__cpu_to_le32(ar->wmi.mem_chunks[i].len);
cmd->host_mem_chunks[i].req_id =
__cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
ath10k_dbg(ATH10K_DBG_WMI,
"wmi chunk %d len %d requested, addr 0x%x\n",
i,
cmd->host_mem_chunks[i].size,
cmd->host_mem_chunks[i].ptr);
}
out:
memcpy(&cmd->resource_config, &config, sizeof(config));
ath10k_dbg(ATH10K_DBG_WMI, "wmi init\n");
return ath10k_wmi_cmd_send(ar, buf, WMI_INIT_CMDID);
return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
}
static int ath10k_wmi_10x_cmd_init(struct ath10k *ar)
{
struct wmi_init_cmd_10x *cmd;
struct sk_buff *buf;
struct wmi_resource_config_10x config = {};
u32 len, val;
int i;
config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
config.num_peer_keys = __cpu_to_le32(TARGET_10X_NUM_PEER_KEYS);
config.num_tids = __cpu_to_le32(TARGET_10X_NUM_TIDS);
config.ast_skid_limit = __cpu_to_le32(TARGET_10X_AST_SKID_LIMIT);
config.tx_chain_mask = __cpu_to_le32(TARGET_10X_TX_CHAIN_MASK);
config.rx_chain_mask = __cpu_to_le32(TARGET_10X_RX_CHAIN_MASK);
config.rx_timeout_pri_vo = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
config.rx_timeout_pri_vi = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
config.rx_timeout_pri_be = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_LO_PRI);
config.rx_timeout_pri_bk = __cpu_to_le32(TARGET_10X_RX_TIMEOUT_HI_PRI);
config.rx_decap_mode = __cpu_to_le32(TARGET_10X_RX_DECAP_MODE);
config.scan_max_pending_reqs =
__cpu_to_le32(TARGET_10X_SCAN_MAX_PENDING_REQS);
config.bmiss_offload_max_vdev =
__cpu_to_le32(TARGET_10X_BMISS_OFFLOAD_MAX_VDEV);
config.roam_offload_max_vdev =
__cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_VDEV);
config.roam_offload_max_ap_profiles =
__cpu_to_le32(TARGET_10X_ROAM_OFFLOAD_MAX_AP_PROFILES);
config.num_mcast_groups = __cpu_to_le32(TARGET_10X_NUM_MCAST_GROUPS);
config.num_mcast_table_elems =
__cpu_to_le32(TARGET_10X_NUM_MCAST_TABLE_ELEMS);
config.mcast2ucast_mode = __cpu_to_le32(TARGET_10X_MCAST2UCAST_MODE);
config.tx_dbg_log_size = __cpu_to_le32(TARGET_10X_TX_DBG_LOG_SIZE);
config.num_wds_entries = __cpu_to_le32(TARGET_10X_NUM_WDS_ENTRIES);
config.dma_burst_size = __cpu_to_le32(TARGET_10X_DMA_BURST_SIZE);
config.mac_aggr_delim = __cpu_to_le32(TARGET_10X_MAC_AGGR_DELIM);
val = TARGET_10X_RX_SKIP_DEFRAG_TIMEOUT_DUP_DETECTION_CHECK;
config.rx_skip_defrag_timeout_dup_detection_check = __cpu_to_le32(val);
config.vow_config = __cpu_to_le32(TARGET_10X_VOW_CONFIG);
config.num_msdu_desc = __cpu_to_le32(TARGET_10X_NUM_MSDU_DESC);
config.max_frag_entries = __cpu_to_le32(TARGET_10X_MAX_FRAG_ENTRIES);
len = sizeof(*cmd) +
(sizeof(struct host_memory_chunk) * ar->wmi.num_mem_chunks);
buf = ath10k_wmi_alloc_skb(len);
if (!buf)
return -ENOMEM;
cmd = (struct wmi_init_cmd_10x *)buf->data;
if (ar->wmi.num_mem_chunks == 0) {
cmd->num_host_mem_chunks = 0;
goto out;
}
ath10k_dbg(ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
__cpu_to_le32(ar->wmi.num_mem_chunks));
cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
cmd->host_mem_chunks[i].ptr =
__cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
cmd->host_mem_chunks[i].size =
__cpu_to_le32(ar->wmi.mem_chunks[i].len);
cmd->host_mem_chunks[i].req_id =
__cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
ath10k_dbg(ATH10K_DBG_WMI,
"wmi chunk %d len %d requested, addr 0x%x\n",
i,
cmd->host_mem_chunks[i].size,
cmd->host_mem_chunks[i].ptr);
}
out:
memcpy(&cmd->resource_config, &config, sizeof(config));
ath10k_dbg(ATH10K_DBG_WMI, "wmi init 10x\n");
return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
}
static int ath10k_wmi_start_scan_calc_len(const struct wmi_start_scan_arg *arg)
int ath10k_wmi_cmd_init(struct ath10k *ar)
{
int ret;
if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
ret = ath10k_wmi_10x_cmd_init(ar);
else
ret = ath10k_wmi_main_cmd_init(ar);
return ret;
}
static int ath10k_wmi_start_scan_calc_len(struct ath10k *ar,
const struct wmi_start_scan_arg *arg)
{
int len;
if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
len = sizeof(struct wmi_start_scan_cmd_10x);
else
len = sizeof(struct wmi_start_scan_cmd);
if (arg->ie_len) {
......@@ -1449,7 +2407,7 @@ int ath10k_wmi_start_scan(struct ath10k *ar,
int len = 0;
int i;
len = ath10k_wmi_start_scan_calc_len(arg);
len = ath10k_wmi_start_scan_calc_len(ar, arg);
if (len < 0)
return len; /* len contains error code here */
......@@ -1481,7 +2439,14 @@ int ath10k_wmi_start_scan(struct ath10k *ar,
cmd->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags);
/* TLV list starts after fields included in the struct */
off = sizeof(*cmd);
/* There's just one filed that differes the two start_scan
* structures - burst_duration, which we are not using btw,
no point to make the split here, just shift the buffer to fit with
given FW */
if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
off = sizeof(struct wmi_start_scan_cmd_10x);
else
off = sizeof(struct wmi_start_scan_cmd);
if (arg->n_channels) {
channels = (void *)skb->data + off;
......@@ -1543,7 +2508,7 @@ int ath10k_wmi_start_scan(struct ath10k *ar,
}
ath10k_dbg(ATH10K_DBG_WMI, "wmi start scan\n");
return ath10k_wmi_cmd_send(ar, skb, WMI_START_SCAN_CMDID);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->start_scan_cmdid);
}
void ath10k_wmi_start_scan_init(struct ath10k *ar,
......@@ -1559,7 +2524,7 @@ void ath10k_wmi_start_scan_init(struct ath10k *ar,
arg->repeat_probe_time = 0;
arg->probe_spacing_time = 0;
arg->idle_time = 0;
arg->max_scan_time = 5000;
arg->max_scan_time = 20000;
arg->probe_delay = 5;
arg->notify_scan_events = WMI_SCAN_EVENT_STARTED
| WMI_SCAN_EVENT_COMPLETED
......@@ -1603,7 +2568,7 @@ int ath10k_wmi_stop_scan(struct ath10k *ar, const struct wmi_stop_scan_arg *arg)
ath10k_dbg(ATH10K_DBG_WMI,
"wmi stop scan reqid %d req_type %d vdev/scan_id %d\n",
arg->req_id, arg->req_type, arg->u.scan_id);
return ath10k_wmi_cmd_send(ar, skb, WMI_STOP_SCAN_CMDID);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->stop_scan_cmdid);
}
int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
......@@ -1628,7 +2593,7 @@ int ath10k_wmi_vdev_create(struct ath10k *ar, u32 vdev_id,
"WMI vdev create: id %d type %d subtype %d macaddr %pM\n",
vdev_id, type, subtype, macaddr);
return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_CREATE_CMDID);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_create_cmdid);
}
int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
......@@ -1646,20 +2611,20 @@ int ath10k_wmi_vdev_delete(struct ath10k *ar, u32 vdev_id)
ath10k_dbg(ATH10K_DBG_WMI,
"WMI vdev delete id %d\n", vdev_id);
return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DELETE_CMDID);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_delete_cmdid);
}
static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
const struct wmi_vdev_start_request_arg *arg,
enum wmi_cmd_id cmd_id)
u32 cmd_id)
{
struct wmi_vdev_start_request_cmd *cmd;
struct sk_buff *skb;
const char *cmdname;
u32 flags = 0;
if (cmd_id != WMI_VDEV_START_REQUEST_CMDID &&
cmd_id != WMI_VDEV_RESTART_REQUEST_CMDID)
if (cmd_id != ar->wmi.cmd->vdev_start_request_cmdid &&
cmd_id != ar->wmi.cmd->vdev_restart_request_cmdid)
return -EINVAL;
if (WARN_ON(arg->ssid && arg->ssid_len == 0))
return -EINVAL;
......@@ -1668,9 +2633,9 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
if (WARN_ON(arg->ssid_len > sizeof(cmd->ssid.ssid)))
return -EINVAL;
if (cmd_id == WMI_VDEV_START_REQUEST_CMDID)
if (cmd_id == ar->wmi.cmd->vdev_start_request_cmdid)
cmdname = "start";
else if (cmd_id == WMI_VDEV_RESTART_REQUEST_CMDID)
else if (cmd_id == ar->wmi.cmd->vdev_restart_request_cmdid)
cmdname = "restart";
else
return -EINVAL; /* should not happen, we already check cmd_id */
......@@ -1721,15 +2686,17 @@ static int ath10k_wmi_vdev_start_restart(struct ath10k *ar,
int ath10k_wmi_vdev_start(struct ath10k *ar,
const struct wmi_vdev_start_request_arg *arg)
{
return ath10k_wmi_vdev_start_restart(ar, arg,
WMI_VDEV_START_REQUEST_CMDID);
u32 cmd_id = ar->wmi.cmd->vdev_start_request_cmdid;
return ath10k_wmi_vdev_start_restart(ar, arg, cmd_id);
}
int ath10k_wmi_vdev_restart(struct ath10k *ar,
const struct wmi_vdev_start_request_arg *arg)
{
return ath10k_wmi_vdev_start_restart(ar, arg,
WMI_VDEV_RESTART_REQUEST_CMDID);
u32 cmd_id = ar->wmi.cmd->vdev_restart_request_cmdid;
return ath10k_wmi_vdev_start_restart(ar, arg, cmd_id);
}
int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
......@@ -1746,7 +2713,7 @@ int ath10k_wmi_vdev_stop(struct ath10k *ar, u32 vdev_id)
ath10k_dbg(ATH10K_DBG_WMI, "wmi vdev stop id 0x%x\n", vdev_id);
return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_STOP_CMDID);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_stop_cmdid);
}
int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
......@@ -1767,7 +2734,7 @@ int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid, const u8 *bssid)
"wmi mgmt vdev up id 0x%x assoc id %d bssid %pM\n",
vdev_id, aid, bssid);
return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_UP_CMDID);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_up_cmdid);
}
int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
......@@ -1785,15 +2752,22 @@ int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id)
ath10k_dbg(ATH10K_DBG_WMI,
"wmi mgmt vdev down id 0x%x\n", vdev_id);
return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_DOWN_CMDID);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_down_cmdid);
}
int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
enum wmi_vdev_param param_id, u32 param_value)
u32 param_id, u32 param_value)
{
struct wmi_vdev_set_param_cmd *cmd;
struct sk_buff *skb;
if (param_id == WMI_VDEV_PARAM_UNSUPPORTED) {
ath10k_dbg(ATH10K_DBG_WMI,
"vdev param %d not supported by firmware\n",
param_id);
return -EOPNOTSUPP;
}
skb = ath10k_wmi_alloc_skb(sizeof(*cmd));
if (!skb)
return -ENOMEM;
......@@ -1807,7 +2781,7 @@ int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
"wmi vdev id 0x%x set param %d value %d\n",
vdev_id, param_id, param_value);
return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_SET_PARAM_CMDID);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->vdev_set_param_cmdid);
}
int ath10k_wmi_vdev_install_key(struct ath10k *ar,
......@@ -1842,7 +2816,8 @@ int ath10k_wmi_vdev_install_key(struct ath10k *ar,
ath10k_dbg(ATH10K_DBG_WMI,
"wmi vdev install key idx %d cipher %d len %d\n",
arg->key_idx, arg->key_cipher, arg->key_len);
return ath10k_wmi_cmd_send(ar, skb, WMI_VDEV_INSTALL_KEY_CMDID);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->vdev_install_key_cmdid);
}
int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
......@@ -1862,7 +2837,7 @@ int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
ath10k_dbg(ATH10K_DBG_WMI,
"wmi peer create vdev_id %d peer_addr %pM\n",
vdev_id, peer_addr);
return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_CREATE_CMDID);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_create_cmdid);
}
int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
......@@ -1882,7 +2857,7 @@ int ath10k_wmi_peer_delete(struct ath10k *ar, u32 vdev_id,
ath10k_dbg(ATH10K_DBG_WMI,
"wmi peer delete vdev_id %d peer_addr %pM\n",
vdev_id, peer_addr);
return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_DELETE_CMDID);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_delete_cmdid);
}
int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
......@@ -1903,7 +2878,7 @@ int ath10k_wmi_peer_flush(struct ath10k *ar, u32 vdev_id,
ath10k_dbg(ATH10K_DBG_WMI,
"wmi peer flush vdev_id %d peer_addr %pM tids %08x\n",
vdev_id, peer_addr, tid_bitmap);
return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_FLUSH_TIDS_CMDID);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_flush_tids_cmdid);
}
int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
......@@ -1927,7 +2902,7 @@ int ath10k_wmi_peer_set_param(struct ath10k *ar, u32 vdev_id,
"wmi vdev %d peer 0x%pM set param %d value %d\n",
vdev_id, peer_addr, param_id, param_value);
return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_SET_PARAM_CMDID);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_set_param_cmdid);
}
int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
......@@ -1948,7 +2923,8 @@ int ath10k_wmi_set_psmode(struct ath10k *ar, u32 vdev_id,
"wmi set powersave id 0x%x mode %d\n",
vdev_id, psmode);
return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_MODE_CMDID);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->sta_powersave_mode_cmdid);
}
int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
......@@ -1970,7 +2946,8 @@ int ath10k_wmi_set_sta_ps_param(struct ath10k *ar, u32 vdev_id,
ath10k_dbg(ATH10K_DBG_WMI,
"wmi sta ps param vdev_id 0x%x param %d value %d\n",
vdev_id, param_id, value);
return ath10k_wmi_cmd_send(ar, skb, WMI_STA_POWERSAVE_PARAM_CMDID);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->sta_powersave_param_cmdid);
}
int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
......@@ -1996,7 +2973,8 @@ int ath10k_wmi_set_ap_ps_param(struct ath10k *ar, u32 vdev_id, const u8 *mac,
"wmi ap ps param vdev_id 0x%X param %d value %d mac_addr %pM\n",
vdev_id, param_id, value, mac);
return ath10k_wmi_cmd_send(ar, skb, WMI_AP_PS_PEER_PARAM_CMDID);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->ap_ps_peer_param_cmdid);
}
int ath10k_wmi_scan_chan_list(struct ath10k *ar,
......@@ -2049,7 +3027,7 @@ int ath10k_wmi_scan_chan_list(struct ath10k *ar,
ci->flags |= __cpu_to_le32(flags);
}
return ath10k_wmi_cmd_send(ar, skb, WMI_SCAN_CHAN_LIST_CMDID);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
}
int ath10k_wmi_peer_assoc(struct ath10k *ar,
......@@ -2108,7 +3086,7 @@ int ath10k_wmi_peer_assoc(struct ath10k *ar,
ath10k_dbg(ATH10K_DBG_WMI,
"wmi peer assoc vdev %d addr %pM\n",
arg->vdev_id, arg->addr);
return ath10k_wmi_cmd_send(ar, skb, WMI_PEER_ASSOC_CMDID);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->peer_assoc_cmdid);
}
int ath10k_wmi_beacon_send_nowait(struct ath10k *ar,
......@@ -2128,7 +3106,7 @@ int ath10k_wmi_beacon_send_nowait(struct ath10k *ar,
cmd->hdr.bcn_len = __cpu_to_le32(arg->bcn_len);
memcpy(cmd->bcn, arg->bcn, arg->bcn_len);
return ath10k_wmi_cmd_send_nowait(ar, skb, WMI_BCN_TX_CMDID);
return ath10k_wmi_cmd_send_nowait(ar, skb, ar->wmi.cmd->bcn_tx_cmdid);
}
static void ath10k_wmi_pdev_set_wmm_param(struct wmi_wmm_params *params,
......@@ -2159,7 +3137,8 @@ int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
ath10k_wmi_pdev_set_wmm_param(&cmd->ac_vo, &arg->ac_vo);
ath10k_dbg(ATH10K_DBG_WMI, "wmi pdev set wmm params\n");
return ath10k_wmi_cmd_send(ar, skb, WMI_PDEV_SET_WMM_PARAMS_CMDID);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->pdev_set_wmm_params_cmdid);
}
int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
......@@ -2175,7 +3154,7 @@ int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id)
cmd->stats_id = __cpu_to_le32(stats_id);
ath10k_dbg(ATH10K_DBG_WMI, "wmi request stats %d\n", (int)stats_id);
return ath10k_wmi_cmd_send(ar, skb, WMI_REQUEST_STATS_CMDID);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->request_stats_cmdid);
}
int ath10k_wmi_force_fw_hang(struct ath10k *ar,
......@@ -2194,5 +3173,5 @@ int ath10k_wmi_force_fw_hang(struct ath10k *ar,
ath10k_dbg(ATH10K_DBG_WMI, "wmi force fw hang %d delay %d\n",
type, delay_ms);
return ath10k_wmi_cmd_send(ar, skb, WMI_FORCE_FW_HANG_CMDID);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->force_fw_hang_cmdid);
}
......@@ -208,6 +208,118 @@ struct wmi_mac_addr {
(c_macaddr)[5] = (((pwmi_mac_addr)->word1) >> 8) & 0xff; \
} while (0)
struct wmi_cmd_map {
u32 init_cmdid;
u32 start_scan_cmdid;
u32 stop_scan_cmdid;
u32 scan_chan_list_cmdid;
u32 scan_sch_prio_tbl_cmdid;
u32 pdev_set_regdomain_cmdid;
u32 pdev_set_channel_cmdid;
u32 pdev_set_param_cmdid;
u32 pdev_pktlog_enable_cmdid;
u32 pdev_pktlog_disable_cmdid;
u32 pdev_set_wmm_params_cmdid;
u32 pdev_set_ht_cap_ie_cmdid;
u32 pdev_set_vht_cap_ie_cmdid;
u32 pdev_set_dscp_tid_map_cmdid;
u32 pdev_set_quiet_mode_cmdid;
u32 pdev_green_ap_ps_enable_cmdid;
u32 pdev_get_tpc_config_cmdid;
u32 pdev_set_base_macaddr_cmdid;
u32 vdev_create_cmdid;
u32 vdev_delete_cmdid;
u32 vdev_start_request_cmdid;
u32 vdev_restart_request_cmdid;
u32 vdev_up_cmdid;
u32 vdev_stop_cmdid;
u32 vdev_down_cmdid;
u32 vdev_set_param_cmdid;
u32 vdev_install_key_cmdid;
u32 peer_create_cmdid;
u32 peer_delete_cmdid;
u32 peer_flush_tids_cmdid;
u32 peer_set_param_cmdid;
u32 peer_assoc_cmdid;
u32 peer_add_wds_entry_cmdid;
u32 peer_remove_wds_entry_cmdid;
u32 peer_mcast_group_cmdid;
u32 bcn_tx_cmdid;
u32 pdev_send_bcn_cmdid;
u32 bcn_tmpl_cmdid;
u32 bcn_filter_rx_cmdid;
u32 prb_req_filter_rx_cmdid;
u32 mgmt_tx_cmdid;
u32 prb_tmpl_cmdid;
u32 addba_clear_resp_cmdid;
u32 addba_send_cmdid;
u32 addba_status_cmdid;
u32 delba_send_cmdid;
u32 addba_set_resp_cmdid;
u32 send_singleamsdu_cmdid;
u32 sta_powersave_mode_cmdid;
u32 sta_powersave_param_cmdid;
u32 sta_mimo_ps_mode_cmdid;
u32 pdev_dfs_enable_cmdid;
u32 pdev_dfs_disable_cmdid;
u32 roam_scan_mode;
u32 roam_scan_rssi_threshold;
u32 roam_scan_period;
u32 roam_scan_rssi_change_threshold;
u32 roam_ap_profile;
u32 ofl_scan_add_ap_profile;
u32 ofl_scan_remove_ap_profile;
u32 ofl_scan_period;
u32 p2p_dev_set_device_info;
u32 p2p_dev_set_discoverability;
u32 p2p_go_set_beacon_ie;
u32 p2p_go_set_probe_resp_ie;
u32 p2p_set_vendor_ie_data_cmdid;
u32 ap_ps_peer_param_cmdid;
u32 ap_ps_peer_uapsd_coex_cmdid;
u32 peer_rate_retry_sched_cmdid;
u32 wlan_profile_trigger_cmdid;
u32 wlan_profile_set_hist_intvl_cmdid;
u32 wlan_profile_get_profile_data_cmdid;
u32 wlan_profile_enable_profile_id_cmdid;
u32 wlan_profile_list_profile_id_cmdid;
u32 pdev_suspend_cmdid;
u32 pdev_resume_cmdid;
u32 add_bcn_filter_cmdid;
u32 rmv_bcn_filter_cmdid;
u32 wow_add_wake_pattern_cmdid;
u32 wow_del_wake_pattern_cmdid;
u32 wow_enable_disable_wake_event_cmdid;
u32 wow_enable_cmdid;
u32 wow_hostwakeup_from_sleep_cmdid;
u32 rtt_measreq_cmdid;
u32 rtt_tsf_cmdid;
u32 vdev_spectral_scan_configure_cmdid;
u32 vdev_spectral_scan_enable_cmdid;
u32 request_stats_cmdid;
u32 set_arp_ns_offload_cmdid;
u32 network_list_offload_config_cmdid;
u32 gtk_offload_cmdid;
u32 csa_offload_enable_cmdid;
u32 csa_offload_chanswitch_cmdid;
u32 chatter_set_mode_cmdid;
u32 peer_tid_addba_cmdid;
u32 peer_tid_delba_cmdid;
u32 sta_dtim_ps_method_cmdid;
u32 sta_uapsd_auto_trig_cmdid;
u32 sta_keepalive_cmd;
u32 echo_cmdid;
u32 pdev_utf_cmdid;
u32 dbglog_cfg_cmdid;
u32 pdev_qvit_cmdid;
u32 pdev_ftm_intg_cmdid;
u32 vdev_set_keepalive_cmdid;
u32 vdev_get_keepalive_cmdid;
u32 force_fw_hang_cmdid;
u32 gpio_config_cmdid;
u32 gpio_output_cmdid;
};
/*
* wmi command groups.
*/
......@@ -247,7 +359,9 @@ enum wmi_cmd_group {
#define WMI_CMD_GRP(grp_id) (((grp_id) << 12) | 0x1)
#define WMI_EVT_GRP_START_ID(grp_id) (((grp_id) << 12) | 0x1)
/* Command IDs and commande events. */
#define WMI_CMD_UNSUPPORTED 0
/* Command IDs and command events for MAIN FW. */
enum wmi_cmd_id {
WMI_INIT_CMDID = 0x1,
......@@ -488,6 +602,217 @@ enum wmi_event_id {
WMI_GPIO_INPUT_EVENTID = WMI_EVT_GRP_START_ID(WMI_GRP_GPIO),
};
/* Command IDs and command events for 10.X firmware */
enum wmi_10x_cmd_id {
WMI_10X_START_CMDID = 0x9000,
WMI_10X_END_CMDID = 0x9FFF,
/* initialize the wlan sub system */
WMI_10X_INIT_CMDID,
/* Scan specific commands */
WMI_10X_START_SCAN_CMDID = WMI_10X_START_CMDID,
WMI_10X_STOP_SCAN_CMDID,
WMI_10X_SCAN_CHAN_LIST_CMDID,
WMI_10X_ECHO_CMDID,
/* PDEV(physical device) specific commands */
WMI_10X_PDEV_SET_REGDOMAIN_CMDID,
WMI_10X_PDEV_SET_CHANNEL_CMDID,
WMI_10X_PDEV_SET_PARAM_CMDID,
WMI_10X_PDEV_PKTLOG_ENABLE_CMDID,
WMI_10X_PDEV_PKTLOG_DISABLE_CMDID,
WMI_10X_PDEV_SET_WMM_PARAMS_CMDID,
WMI_10X_PDEV_SET_HT_CAP_IE_CMDID,
WMI_10X_PDEV_SET_VHT_CAP_IE_CMDID,
WMI_10X_PDEV_SET_BASE_MACADDR_CMDID,
WMI_10X_PDEV_SET_DSCP_TID_MAP_CMDID,
WMI_10X_PDEV_SET_QUIET_MODE_CMDID,
WMI_10X_PDEV_GREEN_AP_PS_ENABLE_CMDID,
WMI_10X_PDEV_GET_TPC_CONFIG_CMDID,
/* VDEV(virtual device) specific commands */
WMI_10X_VDEV_CREATE_CMDID,
WMI_10X_VDEV_DELETE_CMDID,
WMI_10X_VDEV_START_REQUEST_CMDID,
WMI_10X_VDEV_RESTART_REQUEST_CMDID,
WMI_10X_VDEV_UP_CMDID,
WMI_10X_VDEV_STOP_CMDID,
WMI_10X_VDEV_DOWN_CMDID,
WMI_10X_VDEV_STANDBY_RESPONSE_CMDID,
WMI_10X_VDEV_RESUME_RESPONSE_CMDID,
WMI_10X_VDEV_SET_PARAM_CMDID,
WMI_10X_VDEV_INSTALL_KEY_CMDID,
/* peer specific commands */
WMI_10X_PEER_CREATE_CMDID,
WMI_10X_PEER_DELETE_CMDID,
WMI_10X_PEER_FLUSH_TIDS_CMDID,
WMI_10X_PEER_SET_PARAM_CMDID,
WMI_10X_PEER_ASSOC_CMDID,
WMI_10X_PEER_ADD_WDS_ENTRY_CMDID,
WMI_10X_PEER_REMOVE_WDS_ENTRY_CMDID,
WMI_10X_PEER_MCAST_GROUP_CMDID,
/* beacon/management specific commands */
WMI_10X_BCN_TX_CMDID,
WMI_10X_BCN_PRB_TMPL_CMDID,
WMI_10X_BCN_FILTER_RX_CMDID,
WMI_10X_PRB_REQ_FILTER_RX_CMDID,
WMI_10X_MGMT_TX_CMDID,
/* commands to directly control ba negotiation directly from host. */
WMI_10X_ADDBA_CLEAR_RESP_CMDID,
WMI_10X_ADDBA_SEND_CMDID,
WMI_10X_ADDBA_STATUS_CMDID,
WMI_10X_DELBA_SEND_CMDID,
WMI_10X_ADDBA_SET_RESP_CMDID,
WMI_10X_SEND_SINGLEAMSDU_CMDID,
/* Station power save specific config */
WMI_10X_STA_POWERSAVE_MODE_CMDID,
WMI_10X_STA_POWERSAVE_PARAM_CMDID,
WMI_10X_STA_MIMO_PS_MODE_CMDID,
/* set debug log config */
WMI_10X_DBGLOG_CFG_CMDID,
/* DFS-specific commands */
WMI_10X_PDEV_DFS_ENABLE_CMDID,
WMI_10X_PDEV_DFS_DISABLE_CMDID,
/* QVIT specific command id */
WMI_10X_PDEV_QVIT_CMDID,
/* Offload Scan and Roaming related commands */
WMI_10X_ROAM_SCAN_MODE,
WMI_10X_ROAM_SCAN_RSSI_THRESHOLD,
WMI_10X_ROAM_SCAN_PERIOD,
WMI_10X_ROAM_SCAN_RSSI_CHANGE_THRESHOLD,
WMI_10X_ROAM_AP_PROFILE,
WMI_10X_OFL_SCAN_ADD_AP_PROFILE,
WMI_10X_OFL_SCAN_REMOVE_AP_PROFILE,
WMI_10X_OFL_SCAN_PERIOD,
/* P2P specific commands */
WMI_10X_P2P_DEV_SET_DEVICE_INFO,
WMI_10X_P2P_DEV_SET_DISCOVERABILITY,
WMI_10X_P2P_GO_SET_BEACON_IE,
WMI_10X_P2P_GO_SET_PROBE_RESP_IE,
/* AP power save specific config */
WMI_10X_AP_PS_PEER_PARAM_CMDID,
WMI_10X_AP_PS_PEER_UAPSD_COEX_CMDID,
/* Rate-control specific commands */
WMI_10X_PEER_RATE_RETRY_SCHED_CMDID,
/* WLAN Profiling commands. */
WMI_10X_WLAN_PROFILE_TRIGGER_CMDID,
WMI_10X_WLAN_PROFILE_SET_HIST_INTVL_CMDID,
WMI_10X_WLAN_PROFILE_GET_PROFILE_DATA_CMDID,
WMI_10X_WLAN_PROFILE_ENABLE_PROFILE_ID_CMDID,
WMI_10X_WLAN_PROFILE_LIST_PROFILE_ID_CMDID,
/* Suspend resume command Ids */
WMI_10X_PDEV_SUSPEND_CMDID,
WMI_10X_PDEV_RESUME_CMDID,
/* Beacon filter commands */
WMI_10X_ADD_BCN_FILTER_CMDID,
WMI_10X_RMV_BCN_FILTER_CMDID,
/* WOW Specific WMI commands*/
WMI_10X_WOW_ADD_WAKE_PATTERN_CMDID,
WMI_10X_WOW_DEL_WAKE_PATTERN_CMDID,
WMI_10X_WOW_ENABLE_DISABLE_WAKE_EVENT_CMDID,
WMI_10X_WOW_ENABLE_CMDID,
WMI_10X_WOW_HOSTWAKEUP_FROM_SLEEP_CMDID,
/* RTT measurement related cmd */
WMI_10X_RTT_MEASREQ_CMDID,
WMI_10X_RTT_TSF_CMDID,
/* transmit beacon by value */
WMI_10X_PDEV_SEND_BCN_CMDID,
/* F/W stats */
WMI_10X_VDEV_SPECTRAL_SCAN_CONFIGURE_CMDID,
WMI_10X_VDEV_SPECTRAL_SCAN_ENABLE_CMDID,
WMI_10X_REQUEST_STATS_CMDID,
/* GPIO Configuration */
WMI_10X_GPIO_CONFIG_CMDID,
WMI_10X_GPIO_OUTPUT_CMDID,
WMI_10X_PDEV_UTF_CMDID = WMI_10X_END_CMDID - 1,
};
enum wmi_10x_event_id {
WMI_10X_SERVICE_READY_EVENTID = 0x8000,
WMI_10X_READY_EVENTID,
WMI_10X_START_EVENTID = 0x9000,
WMI_10X_END_EVENTID = 0x9FFF,
/* Scan specific events */
WMI_10X_SCAN_EVENTID = WMI_10X_START_EVENTID,
WMI_10X_ECHO_EVENTID,
WMI_10X_DEBUG_MESG_EVENTID,
WMI_10X_UPDATE_STATS_EVENTID,
/* Instantaneous RSSI event */
WMI_10X_INST_RSSI_STATS_EVENTID,
/* VDEV specific events */
WMI_10X_VDEV_START_RESP_EVENTID,
WMI_10X_VDEV_STANDBY_REQ_EVENTID,
WMI_10X_VDEV_RESUME_REQ_EVENTID,
WMI_10X_VDEV_STOPPED_EVENTID,
/* peer specific events */
WMI_10X_PEER_STA_KICKOUT_EVENTID,
/* beacon/mgmt specific events */
WMI_10X_HOST_SWBA_EVENTID,
WMI_10X_TBTTOFFSET_UPDATE_EVENTID,
WMI_10X_MGMT_RX_EVENTID,
/* Channel stats event */
WMI_10X_CHAN_INFO_EVENTID,
/* PHY Error specific WMI event */
WMI_10X_PHYERR_EVENTID,
/* Roam event to trigger roaming on host */
WMI_10X_ROAM_EVENTID,
/* matching AP found from list of profiles */
WMI_10X_PROFILE_MATCH,
/* debug print message used for tracing FW code while debugging */
WMI_10X_DEBUG_PRINT_EVENTID,
/* VI spoecific event */
WMI_10X_PDEV_QVIT_EVENTID,
/* FW code profile data in response to profile request */
WMI_10X_WLAN_PROFILE_DATA_EVENTID,
/*RTT related event ID*/
WMI_10X_RTT_MEASUREMENT_REPORT_EVENTID,
WMI_10X_TSF_MEASUREMENT_REPORT_EVENTID,
WMI_10X_RTT_ERROR_REPORT_EVENTID,
WMI_10X_WOW_WAKEUP_HOST_EVENTID,
WMI_10X_DCS_INTERFERENCE_EVENTID,
/* TPC config for the current operating channel */
WMI_10X_PDEV_TPC_CONFIG_EVENTID,
WMI_10X_GPIO_INPUT_EVENTID,
WMI_10X_PDEV_UTF_EVENTID = WMI_10X_END_EVENTID-1,
};
enum wmi_phy_mode {
MODE_11A = 0, /* 11a Mode */
MODE_11G = 1, /* 11b/g Mode */
......@@ -805,6 +1130,46 @@ struct wmi_service_ready_event {
struct wlan_host_mem_req mem_reqs[1];
} __packed;
/* This is the definition from 10.X firmware branch */
struct wmi_service_ready_event_10x {
__le32 sw_version;
__le32 abi_version;
/* WMI_PHY_CAPABILITY */
__le32 phy_capability;
/* Maximum number of frag table entries that SW will populate less 1 */
__le32 max_frag_entry;
__le32 wmi_service_bitmap[WMI_SERVICE_BM_SIZE];
__le32 num_rf_chains;
/*
* The following field is only valid for service type
* WMI_SERVICE_11AC
*/
__le32 ht_cap_info; /* WMI HT Capability */
__le32 vht_cap_info; /* VHT capability info field of 802.11ac */
__le32 vht_supp_mcs; /* VHT Supported MCS Set field Rx/Tx same */
__le32 hw_min_tx_power;
__le32 hw_max_tx_power;
struct hal_reg_capabilities hal_reg_capabilities;
__le32 sys_cap_info;
__le32 min_pkt_size_enable; /* Enterprise mode short pkt enable */
/*
* request to host to allocate a chuck of memory and pss it down to FW
* via WM_INIT. FW uses this as FW extesnsion memory for saving its
* data structures. Only valid for low latency interfaces like PCIE
* where FW can access this memory directly (or) by DMA.
*/
__le32 num_mem_reqs;
struct wlan_host_mem_req mem_reqs[1];
} __packed;
#define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ)
#define WMI_UNIFIED_READY_TIMEOUT_HZ (5*HZ)
......@@ -1012,6 +1377,192 @@ struct wmi_resource_config {
__le32 max_frag_entries;
} __packed;
struct wmi_resource_config_10x {
/* number of virtual devices (VAPs) to support */
__le32 num_vdevs;
/* number of peer nodes to support */
__le32 num_peers;
/* number of keys per peer */
__le32 num_peer_keys;
/* total number of TX/RX data TIDs */
__le32 num_tids;
/*
* max skid for resolving hash collisions
*
* The address search table is sparse, so that if two MAC addresses
* result in the same hash value, the second of these conflicting
* entries can slide to the next index in the address search table,
* and use it, if it is unoccupied. This ast_skid_limit parameter
* specifies the upper bound on how many subsequent indices to search
* over to find an unoccupied space.
*/
__le32 ast_skid_limit;
/*
* the nominal chain mask for transmit
*
* The chain mask may be modified dynamically, e.g. to operate AP
* tx with a reduced number of chains if no clients are associated.
* This configuration parameter specifies the nominal chain-mask that
* should be used when not operating with a reduced set of tx chains.
*/
__le32 tx_chain_mask;
/*
* the nominal chain mask for receive
*
* The chain mask may be modified dynamically, e.g. for a client
* to use a reduced number of chains for receive if the traffic to
* the client is low enough that it doesn't require downlink MIMO
* or antenna diversity.
* This configuration parameter specifies the nominal chain-mask that
* should be used when not operating with a reduced set of rx chains.
*/
__le32 rx_chain_mask;
/*
* what rx reorder timeout (ms) to use for the AC
*
* Each WMM access class (voice, video, best-effort, background) will
* have its own timeout value to dictate how long to wait for missing
* rx MPDUs to arrive before flushing subsequent MPDUs that have
* already been received.
* This parameter specifies the timeout in milliseconds for each
* class.
*/
__le32 rx_timeout_pri_vi;
__le32 rx_timeout_pri_vo;
__le32 rx_timeout_pri_be;
__le32 rx_timeout_pri_bk;
/*
* what mode the rx should decap packets to
*
* MAC can decap to RAW (no decap), native wifi or Ethernet types
* THis setting also determines the default TX behavior, however TX
* behavior can be modified on a per VAP basis during VAP init
*/
__le32 rx_decap_mode;
/* what is the maximum scan requests than can be queued */
__le32 scan_max_pending_reqs;
/* maximum VDEV that could use BMISS offload */
__le32 bmiss_offload_max_vdev;
/* maximum VDEV that could use offload roaming */
__le32 roam_offload_max_vdev;
/* maximum AP profiles that would push to offload roaming */
__le32 roam_offload_max_ap_profiles;
/*
* how many groups to use for mcast->ucast conversion
*
* The target's WAL maintains a table to hold information regarding
* which peers belong to a given multicast group, so that if
* multicast->unicast conversion is enabled, the target can convert
* multicast tx frames to a series of unicast tx frames, to each
* peer within the multicast group.
This num_mcast_groups configuration parameter tells the target how
* many multicast groups to provide storage for within its multicast
* group membership table.
*/
__le32 num_mcast_groups;
/*
* size to alloc for the mcast membership table
*
* This num_mcast_table_elems configuration parameter tells the
* target how many peer elements it needs to provide storage for in
* its multicast group membership table.
* These multicast group membership table elements are shared by the
* multicast groups stored within the table.
*/
__le32 num_mcast_table_elems;
/*
* whether/how to do multicast->unicast conversion
*
* This configuration parameter specifies whether the target should
* perform multicast --> unicast conversion on transmit, and if so,
* what to do if it finds no entries in its multicast group
* membership table for the multicast IP address in the tx frame.
* Configuration value:
* 0 -> Do not perform multicast to unicast conversion.
* 1 -> Convert multicast frames to unicast, if the IP multicast
* address from the tx frame is found in the multicast group
* membership table. If the IP multicast address is not found,
* drop the frame.
* 2 -> Convert multicast frames to unicast, if the IP multicast
* address from the tx frame is found in the multicast group
* membership table. If the IP multicast address is not found,
* transmit the frame as multicast.
*/
__le32 mcast2ucast_mode;
/*
* how much memory to allocate for a tx PPDU dbg log
*
* This parameter controls how much memory the target will allocate
* to store a log of tx PPDU meta-information (how large the PPDU
* was, when it was sent, whether it was successful, etc.)
*/
__le32 tx_dbg_log_size;
/* how many AST entries to be allocated for WDS */
__le32 num_wds_entries;
/*
* MAC DMA burst size, e.g., For target PCI limit can be
* 0 -default, 1 256B
*/
__le32 dma_burst_size;
/*
* Fixed delimiters to be inserted after every MPDU to
* account for interface latency to avoid underrun.
*/
__le32 mac_aggr_delim;
/*
* determine whether target is responsible for detecting duplicate
* non-aggregate MPDU and timing out stale fragments.
*
* A-MPDU reordering is always performed on the target.
*
* 0: target responsible for frag timeout and dup checking
* 1: host responsible for frag timeout and dup checking
*/
__le32 rx_skip_defrag_timeout_dup_detection_check;
/*
* Configuration for VoW :
* No of Video Nodes to be supported
* and Max no of descriptors for each Video link (node).
*/
__le32 vow_config;
/* Number of msdu descriptors target should use */
__le32 num_msdu_desc;
/*
* Max. number of Tx fragments per MSDU
* This parameter controls the max number of Tx fragments per MSDU.
* This is sent by the target as part of the WMI_SERVICE_READY event
* and is overriden by the OS shim as required.
*/
__le32 max_frag_entries;
} __packed;
#define NUM_UNITS_IS_NUM_VDEVS 0x1
#define NUM_UNITS_IS_NUM_PEERS 0x2
/* strucutre describing host memory chunk. */
struct host_memory_chunk {
/* id of the request that is passed up in service ready */
......@@ -1033,6 +1584,18 @@ struct wmi_init_cmd {
struct host_memory_chunk host_mem_chunks[1];
} __packed;
/* _10x stucture is from 10.X FW API */
struct wmi_init_cmd_10x {
struct wmi_resource_config_10x resource_config;
__le32 num_host_mem_chunks;
/*
* variable number of host memory chunks.
* This should be the last element in the structure
*/
struct host_memory_chunk host_mem_chunks[1];
} __packed;
/* TLV for channel list */
struct wmi_chan_list {
__le32 tag; /* WMI_CHAN_LIST_TAG */
......@@ -1152,6 +1715,88 @@ struct wmi_start_scan_cmd {
*/
} __packed;
/* This is the definition from 10.X firmware branch */
struct wmi_start_scan_cmd_10x {
/* Scan ID */
__le32 scan_id;
/* Scan requestor ID */
__le32 scan_req_id;
/* VDEV id(interface) that is requesting scan */
__le32 vdev_id;
/* Scan Priority, input to scan scheduler */
__le32 scan_priority;
/* Scan events subscription */
__le32 notify_scan_events;
/* dwell time in msec on active channels */
__le32 dwell_time_active;
/* dwell time in msec on passive channels */
__le32 dwell_time_passive;
/*
* min time in msec on the BSS channel,only valid if atleast one
* VDEV is active
*/
__le32 min_rest_time;
/*
* max rest time in msec on the BSS channel,only valid if at least
* one VDEV is active
*/
/*
* the scanner will rest on the bss channel at least min_rest_time
* after min_rest_time the scanner will start checking for tx/rx
* activity on all VDEVs. if there is no activity the scanner will
* switch to off channel. if there is activity the scanner will let
* the radio on the bss channel until max_rest_time expires.at
* max_rest_time scanner will switch to off channel irrespective of
* activity. activity is determined by the idle_time parameter.
*/
__le32 max_rest_time;
/*
* time before sending next set of probe requests.
* The scanner keeps repeating probe requests transmission with
* period specified by repeat_probe_time.
* The number of probe requests specified depends on the ssid_list
* and bssid_list
*/
__le32 repeat_probe_time;
/* time in msec between 2 consequetive probe requests with in a set. */
__le32 probe_spacing_time;
/*
* data inactivity time in msec on bss channel that will be used by
* scanner for measuring the inactivity.
*/
__le32 idle_time;
/* maximum time in msec allowed for scan */
__le32 max_scan_time;
/*
* delay in msec before sending first probe request after switching
* to a channel
*/
__le32 probe_delay;
/* Scan control flags */
__le32 scan_ctrl_flags;
/*
* TLV (tag length value ) paramerters follow the scan_cmd structure.
* TLV can contain channel list, bssid list, ssid list and
* ie. the TLV tags are defined above;
*/
} __packed;
struct wmi_ssid_arg {
int len;
const u8 *ssid;
......@@ -1509,6 +2154,60 @@ struct wmi_csa_event {
#define VDEV_DEFAULT_STATS_UPDATE_PERIOD 500
#define PEER_DEFAULT_STATS_UPDATE_PERIOD 500
struct wmi_pdev_param_map {
u32 tx_chain_mask;
u32 rx_chain_mask;
u32 txpower_limit2g;
u32 txpower_limit5g;
u32 txpower_scale;
u32 beacon_gen_mode;
u32 beacon_tx_mode;
u32 resmgr_offchan_mode;
u32 protection_mode;
u32 dynamic_bw;
u32 non_agg_sw_retry_th;
u32 agg_sw_retry_th;
u32 sta_kickout_th;
u32 ac_aggrsize_scaling;
u32 ltr_enable;
u32 ltr_ac_latency_be;
u32 ltr_ac_latency_bk;
u32 ltr_ac_latency_vi;
u32 ltr_ac_latency_vo;
u32 ltr_ac_latency_timeout;
u32 ltr_sleep_override;
u32 ltr_rx_override;
u32 ltr_tx_activity_timeout;
u32 l1ss_enable;
u32 dsleep_enable;
u32 pcielp_txbuf_flush;
u32 pcielp_txbuf_watermark;
u32 pcielp_txbuf_tmo_en;
u32 pcielp_txbuf_tmo_value;
u32 pdev_stats_update_period;
u32 vdev_stats_update_period;
u32 peer_stats_update_period;
u32 bcnflt_stats_update_period;
u32 pmf_qos;
u32 arp_ac_override;
u32 arpdhcp_ac_override;
u32 dcs;
u32 ani_enable;
u32 ani_poll_period;
u32 ani_listen_period;
u32 ani_ofdm_level;
u32 ani_cck_level;
u32 dyntxchain;
u32 proxy_sta;
u32 idle_ps_config;
u32 power_gating_sleep;
u32 fast_channel_reset;
u32 burst_dur;
u32 burst_enable;
};
#define WMI_PDEV_PARAM_UNSUPPORTED 0
enum wmi_pdev_param {
/* TX chian mask */
WMI_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
......@@ -1608,6 +2307,97 @@ enum wmi_pdev_param {
WMI_PDEV_PARAM_POWER_GATING_SLEEP,
};
enum wmi_10x_pdev_param {
/* TX chian mask */
WMI_10X_PDEV_PARAM_TX_CHAIN_MASK = 0x1,
/* RX chian mask */
WMI_10X_PDEV_PARAM_RX_CHAIN_MASK,
/* TX power limit for 2G Radio */
WMI_10X_PDEV_PARAM_TXPOWER_LIMIT2G,
/* TX power limit for 5G Radio */
WMI_10X_PDEV_PARAM_TXPOWER_LIMIT5G,
/* TX power scale */
WMI_10X_PDEV_PARAM_TXPOWER_SCALE,
/* Beacon generation mode . 0: host, 1: target */
WMI_10X_PDEV_PARAM_BEACON_GEN_MODE,
/* Beacon generation mode . 0: staggered 1: bursted */
WMI_10X_PDEV_PARAM_BEACON_TX_MODE,
/*
* Resource manager off chan mode .
* 0: turn off off chan mode. 1: turn on offchan mode
*/
WMI_10X_PDEV_PARAM_RESMGR_OFFCHAN_MODE,
/*
* Protection mode:
* 0: no protection 1:use CTS-to-self 2: use RTS/CTS
*/
WMI_10X_PDEV_PARAM_PROTECTION_MODE,
/* Dynamic bandwidth 0: disable 1: enable */
WMI_10X_PDEV_PARAM_DYNAMIC_BW,
/* Non aggregrate/ 11g sw retry threshold.0-disable */
WMI_10X_PDEV_PARAM_NON_AGG_SW_RETRY_TH,
/* aggregrate sw retry threshold. 0-disable*/
WMI_10X_PDEV_PARAM_AGG_SW_RETRY_TH,
/* Station kickout threshold (non of consecutive failures).0-disable */
WMI_10X_PDEV_PARAM_STA_KICKOUT_TH,
/* Aggerate size scaling configuration per AC */
WMI_10X_PDEV_PARAM_AC_AGGRSIZE_SCALING,
/* LTR enable */
WMI_10X_PDEV_PARAM_LTR_ENABLE,
/* LTR latency for BE, in us */
WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BE,
/* LTR latency for BK, in us */
WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_BK,
/* LTR latency for VI, in us */
WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VI,
/* LTR latency for VO, in us */
WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_VO,
/* LTR AC latency timeout, in ms */
WMI_10X_PDEV_PARAM_LTR_AC_LATENCY_TIMEOUT,
/* LTR platform latency override, in us */
WMI_10X_PDEV_PARAM_LTR_SLEEP_OVERRIDE,
/* LTR-RX override, in us */
WMI_10X_PDEV_PARAM_LTR_RX_OVERRIDE,
/* Tx activity timeout for LTR, in us */
WMI_10X_PDEV_PARAM_LTR_TX_ACTIVITY_TIMEOUT,
/* L1SS state machine enable */
WMI_10X_PDEV_PARAM_L1SS_ENABLE,
/* Deep sleep state machine enable */
WMI_10X_PDEV_PARAM_DSLEEP_ENABLE,
/* pdev level stats update period in ms */
WMI_10X_PDEV_PARAM_PDEV_STATS_UPDATE_PERIOD,
/* vdev level stats update period in ms */
WMI_10X_PDEV_PARAM_VDEV_STATS_UPDATE_PERIOD,
/* peer level stats update period in ms */
WMI_10X_PDEV_PARAM_PEER_STATS_UPDATE_PERIOD,
/* beacon filter status update period */
WMI_10X_PDEV_PARAM_BCNFLT_STATS_UPDATE_PERIOD,
/* QOS Mgmt frame protection MFP/PMF 0: disable, 1: enable */
WMI_10X_PDEV_PARAM_PMF_QOS,
/* Access category on which ARP and DHCP frames are sent */
WMI_10X_PDEV_PARAM_ARPDHCP_AC_OVERRIDE,
/* DCS configuration */
WMI_10X_PDEV_PARAM_DCS,
/* Enable/Disable ANI on target */
WMI_10X_PDEV_PARAM_ANI_ENABLE,
/* configure the ANI polling period */
WMI_10X_PDEV_PARAM_ANI_POLL_PERIOD,
/* configure the ANI listening period */
WMI_10X_PDEV_PARAM_ANI_LISTEN_PERIOD,
/* configure OFDM immunity level */
WMI_10X_PDEV_PARAM_ANI_OFDM_LEVEL,
/* configure CCK immunity level */
WMI_10X_PDEV_PARAM_ANI_CCK_LEVEL,
/* Enable/Disable CDD for 1x1 STAs in rate control module */
WMI_10X_PDEV_PARAM_DYNTXCHAIN,
/* Enable/Disable Fast channel reset*/
WMI_10X_PDEV_PARAM_FAST_CHANNEL_RESET,
/* Set Bursting DUR */
WMI_10X_PDEV_PARAM_BURST_DUR,
/* Set Bursting Enable*/
WMI_10X_PDEV_PARAM_BURST_ENABLE,
};
struct wmi_pdev_set_param_cmd {
__le32 param_id;
__le32 param_value;
......@@ -2132,6 +2922,61 @@ enum wmi_rate_preamble {
/* Value to disable fixed rate setting */
#define WMI_FIXED_RATE_NONE (0xff)
struct wmi_vdev_param_map {
u32 rts_threshold;
u32 fragmentation_threshold;
u32 beacon_interval;
u32 listen_interval;
u32 multicast_rate;
u32 mgmt_tx_rate;
u32 slot_time;
u32 preamble;
u32 swba_time;
u32 wmi_vdev_stats_update_period;
u32 wmi_vdev_pwrsave_ageout_time;
u32 wmi_vdev_host_swba_interval;
u32 dtim_period;
u32 wmi_vdev_oc_scheduler_air_time_limit;
u32 wds;
u32 atim_window;
u32 bmiss_count_max;
u32 bmiss_first_bcnt;
u32 bmiss_final_bcnt;
u32 feature_wmm;
u32 chwidth;
u32 chextoffset;
u32 disable_htprotection;
u32 sta_quickkickout;
u32 mgmt_rate;
u32 protection_mode;
u32 fixed_rate;
u32 sgi;
u32 ldpc;
u32 tx_stbc;
u32 rx_stbc;
u32 intra_bss_fwd;
u32 def_keyid;
u32 nss;
u32 bcast_data_rate;
u32 mcast_data_rate;
u32 mcast_indicate;
u32 dhcp_indicate;
u32 unknown_dest_indicate;
u32 ap_keepalive_min_idle_inactive_time_secs;
u32 ap_keepalive_max_idle_inactive_time_secs;
u32 ap_keepalive_max_unresponsive_time_secs;
u32 ap_enable_nawds;
u32 mcast2ucast_set;
u32 enable_rtscts;
u32 txbf;
u32 packet_powersave;
u32 drop_unencry;
u32 tx_encap_type;
u32 ap_detect_out_of_sync_sleeping_sta_time_secs;
};
#define WMI_VDEV_PARAM_UNSUPPORTED 0
/* the definition of different VDEV parameters */
enum wmi_vdev_param {
/* RTS Threshold */
......@@ -2263,6 +3108,121 @@ enum wmi_vdev_param {
WMI_VDEV_PARAM_TX_ENCAP_TYPE,
};
/* the definition of different VDEV parameters */
enum wmi_10x_vdev_param {
/* RTS Threshold */
WMI_10X_VDEV_PARAM_RTS_THRESHOLD = 0x1,
/* Fragmentation threshold */
WMI_10X_VDEV_PARAM_FRAGMENTATION_THRESHOLD,
/* beacon interval in TUs */
WMI_10X_VDEV_PARAM_BEACON_INTERVAL,
/* Listen interval in TUs */
WMI_10X_VDEV_PARAM_LISTEN_INTERVAL,
/* muticast rate in Mbps */
WMI_10X_VDEV_PARAM_MULTICAST_RATE,
/* management frame rate in Mbps */
WMI_10X_VDEV_PARAM_MGMT_TX_RATE,
/* slot time (long vs short) */
WMI_10X_VDEV_PARAM_SLOT_TIME,
/* preamble (long vs short) */
WMI_10X_VDEV_PARAM_PREAMBLE,
/* SWBA time (time before tbtt in msec) */
WMI_10X_VDEV_PARAM_SWBA_TIME,
/* time period for updating VDEV stats */
WMI_10X_VDEV_STATS_UPDATE_PERIOD,
/* age out time in msec for frames queued for station in power save */
WMI_10X_VDEV_PWRSAVE_AGEOUT_TIME,
/*
* Host SWBA interval (time in msec before tbtt for SWBA event
* generation).
*/
WMI_10X_VDEV_HOST_SWBA_INTERVAL,
/* DTIM period (specified in units of num beacon intervals) */
WMI_10X_VDEV_PARAM_DTIM_PERIOD,
/*
* scheduler air time limit for this VDEV. used by off chan
* scheduler.
*/
WMI_10X_VDEV_OC_SCHEDULER_AIR_TIME_LIMIT,
/* enable/dsiable WDS for this VDEV */
WMI_10X_VDEV_PARAM_WDS,
/* ATIM Window */
WMI_10X_VDEV_PARAM_ATIM_WINDOW,
/* BMISS max */
WMI_10X_VDEV_PARAM_BMISS_COUNT_MAX,
/* WMM enables/disabled */
WMI_10X_VDEV_PARAM_FEATURE_WMM,
/* Channel width */
WMI_10X_VDEV_PARAM_CHWIDTH,
/* Channel Offset */
WMI_10X_VDEV_PARAM_CHEXTOFFSET,
/* Disable HT Protection */
WMI_10X_VDEV_PARAM_DISABLE_HTPROTECTION,
/* Quick STA Kickout */
WMI_10X_VDEV_PARAM_STA_QUICKKICKOUT,
/* Rate to be used with Management frames */
WMI_10X_VDEV_PARAM_MGMT_RATE,
/* Protection Mode */
WMI_10X_VDEV_PARAM_PROTECTION_MODE,
/* Fixed rate setting */
WMI_10X_VDEV_PARAM_FIXED_RATE,
/* Short GI Enable/Disable */
WMI_10X_VDEV_PARAM_SGI,
/* Enable LDPC */
WMI_10X_VDEV_PARAM_LDPC,
/* Enable Tx STBC */
WMI_10X_VDEV_PARAM_TX_STBC,
/* Enable Rx STBC */
WMI_10X_VDEV_PARAM_RX_STBC,
/* Intra BSS forwarding */
WMI_10X_VDEV_PARAM_INTRA_BSS_FWD,
/* Setting Default xmit key for Vdev */
WMI_10X_VDEV_PARAM_DEF_KEYID,
/* NSS width */
WMI_10X_VDEV_PARAM_NSS,
/* Set the custom rate for the broadcast data frames */
WMI_10X_VDEV_PARAM_BCAST_DATA_RATE,
/* Set the custom rate (rate-code) for multicast data frames */
WMI_10X_VDEV_PARAM_MCAST_DATA_RATE,
/* Tx multicast packet indicate Enable/Disable */
WMI_10X_VDEV_PARAM_MCAST_INDICATE,
/* Tx DHCP packet indicate Enable/Disable */
WMI_10X_VDEV_PARAM_DHCP_INDICATE,
/* Enable host inspection of Tx unicast packet to unknown destination */
WMI_10X_VDEV_PARAM_UNKNOWN_DEST_INDICATE,
/* The minimum amount of time AP begins to consider STA inactive */
WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MIN_IDLE_INACTIVE_TIME_SECS,
/*
* An associated STA is considered inactive when there is no recent
* TX/RX activity and no downlink frames are buffered for it. Once a
* STA exceeds the maximum idle inactive time, the AP will send an
* 802.11 data-null as a keep alive to verify the STA is still
* associated. If the STA does ACK the data-null, or if the data-null
* is buffered and the STA does not retrieve it, the STA will be
* considered unresponsive
* (see WMI_10X_VDEV_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS).
*/
WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_IDLE_INACTIVE_TIME_SECS,
/*
* An associated STA is considered unresponsive if there is no recent
* TX/RX activity and downlink frames are buffered for it. Once a STA
* exceeds the maximum unresponsive time, the AP will send a
* WMI_10X_STA_KICKOUT event to the host so the STA can be deleted. */
WMI_10X_VDEV_PARAM_AP_KEEPALIVE_MAX_UNRESPONSIVE_TIME_SECS,
/* Enable NAWDS : MCAST INSPECT Enable, NAWDS Flag set */
WMI_10X_VDEV_PARAM_AP_ENABLE_NAWDS,
WMI_10X_VDEV_PARAM_MCAST2UCAST_SET,
/* Enable/Disable RTS-CTS */
WMI_10X_VDEV_PARAM_ENABLE_RTSCTS,
WMI_10X_VDEV_PARAM_AP_DETECT_OUT_OF_SYNC_SLEEPING_STA_TIME_SECS,
};
/* slot time long */
#define WMI_VDEV_SLOT_TIME_LONG 0x1
/* slot time short */
......@@ -3064,8 +4024,7 @@ int ath10k_wmi_pdev_suspend_target(struct ath10k *ar);
int ath10k_wmi_pdev_resume_target(struct ath10k *ar);
int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
u16 rd5g, u16 ctl2g, u16 ctl5g);
int ath10k_wmi_pdev_set_param(struct ath10k *ar, enum wmi_pdev_param id,
u32 value);
int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value);
int ath10k_wmi_cmd_init(struct ath10k *ar);
int ath10k_wmi_start_scan(struct ath10k *ar, const struct wmi_start_scan_arg *);
void ath10k_wmi_start_scan_init(struct ath10k *ar, struct wmi_start_scan_arg *);
......@@ -3085,7 +4044,7 @@ int ath10k_wmi_vdev_up(struct ath10k *ar, u32 vdev_id, u32 aid,
const u8 *bssid);
int ath10k_wmi_vdev_down(struct ath10k *ar, u32 vdev_id);
int ath10k_wmi_vdev_set_param(struct ath10k *ar, u32 vdev_id,
enum wmi_vdev_param param_id, u32 param_value);
u32 param_id, u32 param_value);
int ath10k_wmi_vdev_install_key(struct ath10k *ar,
const struct wmi_vdev_install_key_arg *arg);
int ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
......@@ -3115,5 +4074,6 @@ int ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
int ath10k_wmi_request_stats(struct ath10k *ar, enum wmi_stats_id stats_id);
int ath10k_wmi_force_fw_hang(struct ath10k *ar,
enum wmi_force_fw_hang_type type, u32 delay_ms);
int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb);
#endif /* _WMI_H_ */
......@@ -60,7 +60,7 @@
/* disable credit flow control on a specific service */
#define HTC_CONN_FLGS_DISABLE_CRED_FLOW_CTRL (1 << 3)
#define HTC_CONN_FLGS_SET_RECV_ALLOC_SHIFT 8
#define HTC_CONN_FLGS_SET_RECV_ALLOC_MASK 0xFF00
#define HTC_CONN_FLGS_SET_RECV_ALLOC_MASK 0xFF00U
/* connect response status codes */
#define HTC_SERVICE_SUCCESS 0
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment