Commit 490f0dc4 authored by John W. Linville's avatar John W. Linville

Merge tag 'for-linville-20141024' of git://github.com/kvalo/ath

Conflicts:
	drivers/net/wireless/ath/wil6210/wil6210.h
parents 61ed53de 84cbf3a7
......@@ -443,12 +443,12 @@ int ath10k_ce_rx_post_buf(struct ath10k_ce_pipe *pipe, void *ctx, u32 paddr)
* Guts of ath10k_ce_completed_recv_next.
* The caller takes responsibility for any necessary locking.
*/
static int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp,
unsigned int *flagsp)
int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp,
unsigned int *flagsp)
{
struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
unsigned int nentries_mask = dest_ring->nentries_mask;
......@@ -576,11 +576,11 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
* Guts of ath10k_ce_completed_send_next.
* The caller takes responsibility for any necessary locking.
*/
static int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp)
int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp)
{
struct ath10k_ce_ring *src_ring = ce_state->src_ring;
u32 ctrl_addr = ce_state->ctrl_addr;
......@@ -817,7 +817,10 @@ void ath10k_ce_enable_interrupts(struct ath10k *ar)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int ce_id;
for (ce_id = 0; ce_id < CE_COUNT; ce_id++)
/* Skip the last copy engine, CE7 the diagnostic window, as that
* uses polling and isn't initialized for interrupts.
*/
for (ce_id = 0; ce_id < CE_COUNT - 1; ce_id++)
ath10k_ce_per_engine_handler_adjust(&ar_pci->ce_states[ce_id]);
}
......@@ -1020,37 +1023,10 @@ ath10k_ce_alloc_dest_ring(struct ath10k *ar, unsigned int ce_id,
* initialized by software/firmware.
*/
int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
const struct ce_attr *attr,
void (*send_cb)(struct ath10k_ce_pipe *),
void (*recv_cb)(struct ath10k_ce_pipe *))
const struct ce_attr *attr)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
int ret;
/*
* Make sure there's enough CE ringbuffer entries for HTT TX to avoid
* additional TX locking checks.
*
* For the lack of a better place do the check here.
*/
BUILD_BUG_ON(2*TARGET_NUM_MSDU_DESC >
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC >
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
spin_lock_bh(&ar_pci->ce_lock);
ce_state->ar = ar;
ce_state->id = ce_id;
ce_state->ctrl_addr = ath10k_ce_base_address(ce_id);
ce_state->attr_flags = attr->flags;
ce_state->src_sz_max = attr->src_sz_max;
if (attr->src_nentries)
ce_state->send_cb = send_cb;
if (attr->dest_nentries)
ce_state->recv_cb = recv_cb;
spin_unlock_bh(&ar_pci->ce_lock);
if (attr->src_nentries) {
ret = ath10k_ce_init_src_ring(ar, ce_id, attr);
if (ret) {
......@@ -1098,12 +1074,37 @@ void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id)
}
int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
const struct ce_attr *attr)
const struct ce_attr *attr,
void (*send_cb)(struct ath10k_ce_pipe *),
void (*recv_cb)(struct ath10k_ce_pipe *))
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
int ret;
/*
* Make sure there's enough CE ringbuffer entries for HTT TX to avoid
* additional TX locking checks.
*
* For the lack of a better place do the check here.
*/
BUILD_BUG_ON(2*TARGET_NUM_MSDU_DESC >
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
BUILD_BUG_ON(2*TARGET_10X_NUM_MSDU_DESC >
(CE_HTT_H2T_MSG_SRC_NENTRIES - 1));
ce_state->ar = ar;
ce_state->id = ce_id;
ce_state->ctrl_addr = ath10k_ce_base_address(ce_id);
ce_state->attr_flags = attr->flags;
ce_state->src_sz_max = attr->src_sz_max;
if (attr->src_nentries)
ce_state->send_cb = send_cb;
if (attr->dest_nentries)
ce_state->recv_cb = recv_cb;
if (attr->src_nentries) {
ce_state->src_ring = ath10k_ce_alloc_src_ring(ar, ce_id, attr);
if (IS_ERR(ce_state->src_ring)) {
......
......@@ -192,15 +192,21 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
unsigned int *nbytesp,
unsigned int *transfer_idp);
int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp);
/*==================CE Engine Initialization=======================*/
int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
const struct ce_attr *attr,
void (*send_cb)(struct ath10k_ce_pipe *),
void (*recv_cb)(struct ath10k_ce_pipe *));
const struct ce_attr *attr);
void ath10k_ce_deinit_pipe(struct ath10k *ar, unsigned int ce_id);
int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
const struct ce_attr *attr);
const struct ce_attr *attr,
void (*send_cb)(struct ath10k_ce_pipe *),
void (*recv_cb)(struct ath10k_ce_pipe *));
void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id);
/*==================CE Engine Shutdown=======================*/
......@@ -213,6 +219,13 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp);
int ath10k_ce_completed_recv_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp,
unsigned int *flagsp);
/*
* Support clean shutdown by allowing the caller to cancel
* pending sends. Target DMA must be stopped before using
......
......@@ -138,7 +138,8 @@ static const struct firmware *ath10k_fetch_fw_file(struct ath10k *ar,
return fw;
}
static int ath10k_push_board_ext_data(struct ath10k *ar)
static int ath10k_push_board_ext_data(struct ath10k *ar, const void *data,
size_t data_len)
{
u32 board_data_size = QCA988X_BOARD_DATA_SZ;
u32 board_ext_data_size = QCA988X_BOARD_EXT_DATA_SZ;
......@@ -159,14 +160,14 @@ static int ath10k_push_board_ext_data(struct ath10k *ar)
if (board_ext_data_addr == 0)
return 0;
if (ar->board_len != (board_data_size + board_ext_data_size)) {
if (data_len != (board_data_size + board_ext_data_size)) {
ath10k_err(ar, "invalid board (ext) data sizes %zu != %d+%d\n",
ar->board_len, board_data_size, board_ext_data_size);
data_len, board_data_size, board_ext_data_size);
return -EINVAL;
}
ret = ath10k_bmi_write_memory(ar, board_ext_data_addr,
ar->board_data + board_data_size,
data + board_data_size,
board_ext_data_size);
if (ret) {
ath10k_err(ar, "could not write board ext data (%d)\n", ret);
......@@ -184,13 +185,14 @@ static int ath10k_push_board_ext_data(struct ath10k *ar)
return 0;
}
static int ath10k_download_board_data(struct ath10k *ar)
static int ath10k_download_board_data(struct ath10k *ar, const void *data,
size_t data_len)
{
u32 board_data_size = QCA988X_BOARD_DATA_SZ;
u32 address;
int ret;
ret = ath10k_push_board_ext_data(ar);
ret = ath10k_push_board_ext_data(ar, data, data_len);
if (ret) {
ath10k_err(ar, "could not push board ext data (%d)\n", ret);
goto exit;
......@@ -202,9 +204,9 @@ static int ath10k_download_board_data(struct ath10k *ar)
goto exit;
}
ret = ath10k_bmi_write_memory(ar, address, ar->board_data,
ret = ath10k_bmi_write_memory(ar, address, data,
min_t(u32, board_data_size,
ar->board_len));
data_len));
if (ret) {
ath10k_err(ar, "could not write board data (%d)\n", ret);
goto exit;
......@@ -220,11 +222,39 @@ static int ath10k_download_board_data(struct ath10k *ar)
return ret;
}
static int ath10k_download_cal_file(struct ath10k *ar)
{
int ret;
if (!ar->cal_file)
return -ENOENT;
if (IS_ERR(ar->cal_file))
return PTR_ERR(ar->cal_file);
ret = ath10k_download_board_data(ar, ar->cal_file->data,
ar->cal_file->size);
if (ret) {
ath10k_err(ar, "failed to download cal_file data: %d\n", ret);
return ret;
}
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cal file downloaded\n");
return 0;
}
static int ath10k_download_and_run_otp(struct ath10k *ar)
{
u32 result, address = ar->hw_params.patch_load_addr;
int ret;
ret = ath10k_download_board_data(ar, ar->board_data, ar->board_len);
if (ret) {
ath10k_err(ar, "failed to download board data: %d\n", ret);
return ret;
}
/* OTP is optional */
if (!ar->otp_data || !ar->otp_len) {
......@@ -308,6 +338,9 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar)
if (ar->firmware && !IS_ERR(ar->firmware))
release_firmware(ar->firmware);
if (ar->cal_file && !IS_ERR(ar->cal_file))
release_firmware(ar->cal_file);
ar->board = NULL;
ar->board_data = NULL;
ar->board_len = 0;
......@@ -319,6 +352,27 @@ static void ath10k_core_free_firmware_files(struct ath10k *ar)
ar->firmware = NULL;
ar->firmware_data = NULL;
ar->firmware_len = 0;
ar->cal_file = NULL;
}
static int ath10k_fetch_cal_file(struct ath10k *ar)
{
char filename[100];
/* cal-<bus>-<id>.bin */
scnprintf(filename, sizeof(filename), "cal-%s-%s.bin",
ath10k_bus_str(ar->hif.bus), dev_name(ar->dev));
ar->cal_file = ath10k_fetch_fw_file(ar, ATH10K_FW_DIR, filename);
if (IS_ERR(ar->cal_file))
/* calibration file is optional, don't print any warnings */
return PTR_ERR(ar->cal_file);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "found calibration file %s/%s\n",
ATH10K_FW_DIR, filename);
return 0;
}
static int ath10k_core_fetch_firmware_api_1(struct ath10k *ar)
......@@ -562,6 +616,9 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
{
int ret;
/* calibration file is optional, don't check for any errors */
ath10k_fetch_cal_file(ar);
ar->fw_api = 3;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "trying fw api %d\n", ar->fw_api);
......@@ -589,30 +646,32 @@ static int ath10k_core_fetch_firmware_files(struct ath10k *ar)
return 0;
}
static int ath10k_init_download_firmware(struct ath10k *ar,
enum ath10k_firmware_mode mode)
static int ath10k_download_cal_data(struct ath10k *ar)
{
int ret;
ret = ath10k_download_board_data(ar);
if (ret) {
ath10k_err(ar, "failed to download board data: %d\n", ret);
return ret;
ret = ath10k_download_cal_file(ar);
if (ret == 0) {
ar->cal_mode = ATH10K_CAL_MODE_FILE;
goto done;
}
ath10k_dbg(ar, ATH10K_DBG_BOOT,
"boot did not find a calibration file, try OTP next: %d\n",
ret);
ret = ath10k_download_and_run_otp(ar);
if (ret) {
ath10k_err(ar, "failed to run otp: %d\n", ret);
return ret;
}
ret = ath10k_download_fw(ar, mode);
if (ret) {
ath10k_err(ar, "failed to download firmware: %d\n", ret);
return ret;
}
ar->cal_mode = ATH10K_CAL_MODE_OTP;
return ret;
done:
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot using calibration mode %s\n",
ath10k_cal_mode_str(ar->cal_mode));
return 0;
}
static int ath10k_init_uart(struct ath10k *ar)
......@@ -729,7 +788,11 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
goto err;
}
status = ath10k_init_download_firmware(ar, mode);
status = ath10k_download_cal_data(ar);
if (status)
goto err;
status = ath10k_download_fw(ar, mode);
if (status)
goto err;
......@@ -846,9 +909,9 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
goto err_hif_stop;
if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
ar->free_vdev_map = (1 << TARGET_10X_NUM_VDEVS) - 1;
ar->free_vdev_map = (1LL << TARGET_10X_NUM_VDEVS) - 1;
else
ar->free_vdev_map = (1 << TARGET_NUM_VDEVS) - 1;
ar->free_vdev_map = (1LL << TARGET_NUM_VDEVS) - 1;
INIT_LIST_HEAD(&ar->arvifs);
......@@ -1084,6 +1147,7 @@ void ath10k_core_unregister(struct ath10k *ar)
EXPORT_SYMBOL(ath10k_core_unregister);
struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
enum ath10k_bus bus,
const struct ath10k_hif_ops *hif_ops)
{
struct ath10k *ar;
......@@ -1100,6 +1164,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
ar->dev = dev;
ar->hif.ops = hif_ops;
ar->hif.bus = bus;
init_completion(&ar->scan.started);
init_completion(&ar->scan.completed);
......
......@@ -63,6 +63,20 @@
struct ath10k;
enum ath10k_bus {
ATH10K_BUS_PCI,
};
static inline const char *ath10k_bus_str(enum ath10k_bus bus)
{
switch (bus) {
case ATH10K_BUS_PCI:
return "pci";
}
return "unknown";
}
struct ath10k_skb_cb {
dma_addr_t paddr;
u8 vdev_id;
......@@ -96,8 +110,6 @@ struct ath10k_bmi {
bool done_sent;
};
#define ATH10K_MAX_MEM_REQS 16
struct ath10k_mem_chunk {
void *vaddr;
dma_addr_t paddr;
......@@ -115,17 +127,21 @@ struct ath10k_wmi {
struct wmi_pdev_param_map *pdev_param;
u32 num_mem_chunks;
struct ath10k_mem_chunk mem_chunks[ATH10K_MAX_MEM_REQS];
struct ath10k_mem_chunk mem_chunks[WMI_MAX_MEM_REQS];
};
struct ath10k_peer_stat {
struct ath10k_fw_stats_peer {
struct list_head list;
u8 peer_macaddr[ETH_ALEN];
u32 peer_rssi;
u32 peer_tx_rate;
u32 peer_rx_rate; /* 10x only */
};
struct ath10k_target_stats {
struct ath10k_fw_stats_pdev {
struct list_head list;
/* PDEV stats */
s32 ch_noise_floor;
u32 tx_frame_count;
......@@ -180,15 +196,11 @@ struct ath10k_target_stats {
s32 phy_errs;
s32 phy_err_drop;
s32 mpdu_errs;
};
/* VDEV STATS */
/* PEER STATS */
u8 peers;
struct ath10k_peer_stat peer_stat[TARGET_NUM_PEERS];
/* TODO: Beacon filter stats */
struct ath10k_fw_stats {
struct list_head pdevs;
struct list_head peers;
};
struct ath10k_dfs_stats {
......@@ -234,6 +246,8 @@ struct ath10k_vif {
struct sk_buff *beacon;
/* protected by data_lock */
bool beacon_sent;
void *beacon_buf;
dma_addr_t beacon_paddr;
struct ath10k *ar;
struct ieee80211_vif *vif;
......@@ -273,6 +287,7 @@ struct ath10k_vif {
u8 force_sgi;
bool use_cts_prot;
int num_legacy_stations;
int txpower;
};
struct ath10k_vif_iter {
......@@ -292,17 +307,19 @@ struct ath10k_fw_crash_data {
struct ath10k_debug {
struct dentry *debugfs_phy;
struct ath10k_target_stats target_stats;
struct ath10k_fw_stats fw_stats;
struct completion fw_stats_complete;
bool fw_stats_done;
DECLARE_BITMAP(wmi_service_bitmap, WMI_SERVICE_MAX);
struct completion event_stats_compl;
unsigned long htt_stats_mask;
struct delayed_work htt_stats_dwork;
struct ath10k_dfs_stats dfs_stats;
struct ath_dfs_pool_stats dfs_pool_stats;
/* protected by conf_mutex */
u32 fw_dbglog_mask;
u32 pktlog_filter;
u8 htt_max_amsdu;
u8 htt_max_ampdu;
......@@ -371,6 +388,23 @@ enum ath10k_dev_flags {
ATH10K_FLAG_CORE_REGISTERED,
};
enum ath10k_cal_mode {
ATH10K_CAL_MODE_FILE,
ATH10K_CAL_MODE_OTP,
};
static inline const char *ath10k_cal_mode_str(enum ath10k_cal_mode mode)
{
switch (mode) {
case ATH10K_CAL_MODE_FILE:
return "file";
case ATH10K_CAL_MODE_OTP:
return "otp";
}
return "unknown";
}
enum ath10k_scan_state {
ATH10K_SCAN_IDLE,
ATH10K_SCAN_STARTING,
......@@ -421,6 +455,7 @@ struct ath10k {
bool p2p;
struct {
enum ath10k_bus bus;
const struct ath10k_hif_ops *ops;
} hif;
......@@ -456,7 +491,10 @@ struct ath10k {
const void *firmware_data;
size_t firmware_len;
const struct firmware *cal_file;
int fw_api;
enum ath10k_cal_mode cal_mode;
struct {
struct completion started;
......@@ -482,7 +520,7 @@ struct ath10k {
/* current operating channel definition */
struct cfg80211_chan_def chandef;
int free_vdev_map;
unsigned long long free_vdev_map;
bool monitor;
int monitor_vdev_id;
bool monitor_started;
......@@ -563,11 +601,19 @@ struct ath10k {
bool utf_monitor;
} testmode;
struct {
/* protected by data_lock */
u32 fw_crash_counter;
u32 fw_warm_reset_counter;
u32 fw_cold_reset_counter;
} stats;
/* must be last */
u8 drv_priv[0] __aligned(sizeof(void *));
};
struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
enum ath10k_bus bus,
const struct ath10k_hif_ops *hif_ops);
void ath10k_core_destroy(struct ath10k *ar);
......
......@@ -23,6 +23,7 @@
#include "core.h"
#include "debug.h"
#include "hif.h"
/* ms */
#define ATH10K_DEBUG_HTT_STATS_INTERVAL 1000
......@@ -106,34 +107,36 @@ struct ath10k_dump_file_data {
u8 data[0];
} __packed;
int ath10k_info(struct ath10k *ar, const char *fmt, ...)
void ath10k_info(struct ath10k *ar, const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
};
va_list args;
int ret;
va_start(args, fmt);
vaf.va = &args;
ret = dev_info(ar->dev, "%pV", &vaf);
dev_info(ar->dev, "%pV", &vaf);
trace_ath10k_log_info(ar, &vaf);
va_end(args);
return ret;
}
EXPORT_SYMBOL(ath10k_info);
void ath10k_print_driver_info(struct ath10k *ar)
{
ath10k_info(ar, "%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d\n",
ath10k_info(ar, "%s (0x%08x, 0x%08x) fw %s api %d htt %d.%d wmi %d.%d.%d.%d cal %s\n",
ar->hw_params.name,
ar->target_version,
ar->chip_id,
ar->hw->wiphy->fw_version,
ar->fw_api,
ar->htt.target_version_major,
ar->htt.target_version_minor);
ar->htt.target_version_minor,
ar->fw_version_major,
ar->fw_version_minor,
ar->fw_version_release,
ar->fw_version_build,
ath10k_cal_mode_str(ar->cal_mode));
ath10k_info(ar, "debug %d debugfs %d tracing %d dfs %d testmode %d\n",
config_enabled(CONFIG_ATH10K_DEBUG),
config_enabled(CONFIG_ATH10K_DEBUGFS),
......@@ -143,25 +146,22 @@ void ath10k_print_driver_info(struct ath10k *ar)
}
EXPORT_SYMBOL(ath10k_print_driver_info);
int ath10k_err(struct ath10k *ar, const char *fmt, ...)
void ath10k_err(struct ath10k *ar, const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
};
va_list args;
int ret;
va_start(args, fmt);
vaf.va = &args;
ret = dev_err(ar->dev, "%pV", &vaf);
dev_err(ar->dev, "%pV", &vaf);
trace_ath10k_log_err(ar, &vaf);
va_end(args);
return ret;
}
EXPORT_SYMBOL(ath10k_err);
int ath10k_warn(struct ath10k *ar, const char *fmt, ...)
void ath10k_warn(struct ath10k *ar, const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
......@@ -174,15 +174,13 @@ int ath10k_warn(struct ath10k *ar, const char *fmt, ...)
trace_ath10k_log_warn(ar, &vaf);
va_end(args);
return 0;
}
EXPORT_SYMBOL(ath10k_warn);
#ifdef CONFIG_ATH10K_DEBUGFS
void ath10k_debug_read_service_map(struct ath10k *ar,
void *service_map,
const void *service_map,
size_t map_size)
{
memcpy(ar->debug.wmi_service_bitmap, service_map, map_size);
......@@ -242,169 +240,182 @@ static const struct file_operations fops_wmi_services = {
.llseek = default_llseek,
};
void ath10k_debug_read_target_stats(struct ath10k *ar,
struct wmi_stats_event *ev)
static void ath10k_debug_fw_stats_pdevs_free(struct list_head *head)
{
u8 *tmp = ev->data;
struct ath10k_target_stats *stats;
int num_pdev_stats, num_vdev_stats, num_peer_stats;
struct wmi_pdev_stats_10x *ps;
int i;
struct ath10k_fw_stats_pdev *i, *tmp;
list_for_each_entry_safe(i, tmp, head, list) {
list_del(&i->list);
kfree(i);
}
}
static void ath10k_debug_fw_stats_peers_free(struct list_head *head)
{
struct ath10k_fw_stats_peer *i, *tmp;
list_for_each_entry_safe(i, tmp, head, list) {
list_del(&i->list);
kfree(i);
}
}
static void ath10k_debug_fw_stats_reset(struct ath10k *ar)
{
spin_lock_bh(&ar->data_lock);
ar->debug.fw_stats_done = false;
ath10k_debug_fw_stats_pdevs_free(&ar->debug.fw_stats.pdevs);
ath10k_debug_fw_stats_peers_free(&ar->debug.fw_stats.peers);
spin_unlock_bh(&ar->data_lock);
}
stats = &ar->debug.target_stats;
num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats); /* 0 or 1 */
num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats); /* 0 or max vdevs */
num_peer_stats = __le32_to_cpu(ev->num_peer_stats); /* 0 or max peers */
if (num_pdev_stats) {
ps = (struct wmi_pdev_stats_10x *)tmp;
stats->ch_noise_floor = __le32_to_cpu(ps->chan_nf);
stats->tx_frame_count = __le32_to_cpu(ps->tx_frame_count);
stats->rx_frame_count = __le32_to_cpu(ps->rx_frame_count);
stats->rx_clear_count = __le32_to_cpu(ps->rx_clear_count);
stats->cycle_count = __le32_to_cpu(ps->cycle_count);
stats->phy_err_count = __le32_to_cpu(ps->phy_err_count);
stats->chan_tx_power = __le32_to_cpu(ps->chan_tx_pwr);
stats->comp_queued = __le32_to_cpu(ps->wal.tx.comp_queued);
stats->comp_delivered =
__le32_to_cpu(ps->wal.tx.comp_delivered);
stats->msdu_enqued = __le32_to_cpu(ps->wal.tx.msdu_enqued);
stats->mpdu_enqued = __le32_to_cpu(ps->wal.tx.mpdu_enqued);
stats->wmm_drop = __le32_to_cpu(ps->wal.tx.wmm_drop);
stats->local_enqued = __le32_to_cpu(ps->wal.tx.local_enqued);
stats->local_freed = __le32_to_cpu(ps->wal.tx.local_freed);
stats->hw_queued = __le32_to_cpu(ps->wal.tx.hw_queued);
stats->hw_reaped = __le32_to_cpu(ps->wal.tx.hw_reaped);
stats->underrun = __le32_to_cpu(ps->wal.tx.underrun);
stats->tx_abort = __le32_to_cpu(ps->wal.tx.tx_abort);
stats->mpdus_requed = __le32_to_cpu(ps->wal.tx.mpdus_requed);
stats->tx_ko = __le32_to_cpu(ps->wal.tx.tx_ko);
stats->data_rc = __le32_to_cpu(ps->wal.tx.data_rc);
stats->self_triggers = __le32_to_cpu(ps->wal.tx.self_triggers);
stats->sw_retry_failure =
__le32_to_cpu(ps->wal.tx.sw_retry_failure);
stats->illgl_rate_phy_err =
__le32_to_cpu(ps->wal.tx.illgl_rate_phy_err);
stats->pdev_cont_xretry =
__le32_to_cpu(ps->wal.tx.pdev_cont_xretry);
stats->pdev_tx_timeout =
__le32_to_cpu(ps->wal.tx.pdev_tx_timeout);
stats->pdev_resets = __le32_to_cpu(ps->wal.tx.pdev_resets);
stats->phy_underrun = __le32_to_cpu(ps->wal.tx.phy_underrun);
stats->txop_ovf = __le32_to_cpu(ps->wal.tx.txop_ovf);
stats->mid_ppdu_route_change =
__le32_to_cpu(ps->wal.rx.mid_ppdu_route_change);
stats->status_rcvd = __le32_to_cpu(ps->wal.rx.status_rcvd);
stats->r0_frags = __le32_to_cpu(ps->wal.rx.r0_frags);
stats->r1_frags = __le32_to_cpu(ps->wal.rx.r1_frags);
stats->r2_frags = __le32_to_cpu(ps->wal.rx.r2_frags);
stats->r3_frags = __le32_to_cpu(ps->wal.rx.r3_frags);
stats->htt_msdus = __le32_to_cpu(ps->wal.rx.htt_msdus);
stats->htt_mpdus = __le32_to_cpu(ps->wal.rx.htt_mpdus);
stats->loc_msdus = __le32_to_cpu(ps->wal.rx.loc_msdus);
stats->loc_mpdus = __le32_to_cpu(ps->wal.rx.loc_mpdus);
stats->oversize_amsdu =
__le32_to_cpu(ps->wal.rx.oversize_amsdu);
stats->phy_errs = __le32_to_cpu(ps->wal.rx.phy_errs);
stats->phy_err_drop = __le32_to_cpu(ps->wal.rx.phy_err_drop);
stats->mpdu_errs = __le32_to_cpu(ps->wal.rx.mpdu_errs);
if (test_bit(ATH10K_FW_FEATURE_WMI_10X,
ar->fw_features)) {
stats->ack_rx_bad = __le32_to_cpu(ps->ack_rx_bad);
stats->rts_bad = __le32_to_cpu(ps->rts_bad);
stats->rts_good = __le32_to_cpu(ps->rts_good);
stats->fcs_bad = __le32_to_cpu(ps->fcs_bad);
stats->no_beacons = __le32_to_cpu(ps->no_beacons);
stats->mib_int_count = __le32_to_cpu(ps->mib_int_count);
tmp += sizeof(struct wmi_pdev_stats_10x);
} else {
tmp += sizeof(struct wmi_pdev_stats_old);
}
static size_t ath10k_debug_fw_stats_num_peers(struct list_head *head)
{
struct ath10k_fw_stats_peer *i;
size_t num = 0;
list_for_each_entry(i, head, list)
++num;
return num;
}
void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb)
{
struct ath10k_fw_stats stats = {};
bool is_start, is_started, is_end;
size_t num_peers;
int ret;
INIT_LIST_HEAD(&stats.pdevs);
INIT_LIST_HEAD(&stats.peers);
spin_lock_bh(&ar->data_lock);
ret = ath10k_wmi_pull_fw_stats(ar, skb, &stats);
if (ret) {
ath10k_warn(ar, "failed to pull fw stats: %d\n", ret);
goto unlock;
}
/* 0 or max vdevs */
/* Currently firmware does not support VDEV stats */
if (num_vdev_stats) {
struct wmi_vdev_stats *vdev_stats;
/* Stat data may exceed htc-wmi buffer limit. In such case firmware
* splits the stats data and delivers it in a ping-pong fashion of
* request cmd-update event.
*
* However there is no explicit end-of-data. Instead start-of-data is
* used as an implicit one. This works as follows:
* a) discard stat update events until one with pdev stats is
* delivered - this skips session started at end of (b)
* b) consume stat update events until another one with pdev stats is
* delivered which is treated as end-of-data and is itself discarded
*/
for (i = 0; i < num_vdev_stats; i++) {
vdev_stats = (struct wmi_vdev_stats *)tmp;
tmp += sizeof(struct wmi_vdev_stats);
}
if (ar->debug.fw_stats_done) {
ath10k_warn(ar, "received unsolicited stats update event\n");
goto free;
}
if (num_peer_stats) {
struct wmi_peer_stats_10x *peer_stats;
struct ath10k_peer_stat *s;
stats->peers = num_peer_stats;
for (i = 0; i < num_peer_stats; i++) {
peer_stats = (struct wmi_peer_stats_10x *)tmp;
s = &stats->peer_stat[i];
memcpy(s->peer_macaddr, &peer_stats->peer_macaddr.addr,
ETH_ALEN);
s->peer_rssi = __le32_to_cpu(peer_stats->peer_rssi);
s->peer_tx_rate =
__le32_to_cpu(peer_stats->peer_tx_rate);
if (test_bit(ATH10K_FW_FEATURE_WMI_10X,
ar->fw_features)) {
s->peer_rx_rate =
__le32_to_cpu(peer_stats->peer_rx_rate);
tmp += sizeof(struct wmi_peer_stats_10x);
} else {
tmp += sizeof(struct wmi_peer_stats_old);
}
num_peers = ath10k_debug_fw_stats_num_peers(&ar->debug.fw_stats.peers);
is_start = (list_empty(&ar->debug.fw_stats.pdevs) &&
!list_empty(&stats.pdevs));
is_end = (!list_empty(&ar->debug.fw_stats.pdevs) &&
!list_empty(&stats.pdevs));
if (is_start)
list_splice_tail_init(&stats.pdevs, &ar->debug.fw_stats.pdevs);
if (is_end)
ar->debug.fw_stats_done = true;
is_started = !list_empty(&ar->debug.fw_stats.pdevs);
if (is_started && !is_end) {
if (num_peers >= ATH10K_MAX_NUM_PEER_IDS) {
/* Although this is unlikely impose a sane limit to
* prevent firmware from DoS-ing the host.
*/
ath10k_warn(ar, "dropping fw peer stats\n");
goto free;
}
list_splice_tail_init(&stats.peers, &ar->debug.fw_stats.peers);
}
complete(&ar->debug.fw_stats_complete);
free:
/* In some cases lists have been spliced and cleared. Free up
* resources if that is not the case.
*/
ath10k_debug_fw_stats_pdevs_free(&stats.pdevs);
ath10k_debug_fw_stats_peers_free(&stats.peers);
unlock:
spin_unlock_bh(&ar->data_lock);
complete(&ar->debug.event_stats_compl);
}
static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
static int ath10k_debug_fw_stats_request(struct ath10k *ar)
{
struct ath10k *ar = file->private_data;
struct ath10k_target_stats *fw_stats;
char *buf = NULL;
unsigned int len = 0, buf_len = 8000;
ssize_t ret_cnt = 0;
long left;
int i;
unsigned long timeout;
int ret;
fw_stats = &ar->debug.target_stats;
lockdep_assert_held(&ar->conf_mutex);
mutex_lock(&ar->conf_mutex);
timeout = jiffies + msecs_to_jiffies(1*HZ);
if (ar->state != ATH10K_STATE_ON)
goto exit;
ath10k_debug_fw_stats_reset(ar);
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
goto exit;
for (;;) {
if (time_after(jiffies, timeout))
return -ETIMEDOUT;
ret = ath10k_wmi_request_stats(ar, WMI_REQUEST_PEER_STAT);
if (ret) {
ath10k_warn(ar, "could not request stats (%d)\n", ret);
goto exit;
reinit_completion(&ar->debug.fw_stats_complete);
ret = ath10k_wmi_request_stats(ar, WMI_REQUEST_PEER_STAT);
if (ret) {
ath10k_warn(ar, "could not request stats (%d)\n", ret);
return ret;
}
ret = wait_for_completion_timeout(&ar->debug.fw_stats_complete,
1*HZ);
if (ret <= 0)
return -ETIMEDOUT;
spin_lock_bh(&ar->data_lock);
if (ar->debug.fw_stats_done) {
spin_unlock_bh(&ar->data_lock);
break;
}
spin_unlock_bh(&ar->data_lock);
}
left = wait_for_completion_timeout(&ar->debug.event_stats_compl, 1*HZ);
if (left <= 0)
goto exit;
return 0;
}
/* FIXME: How to calculate the buffer size sanely? */
#define ATH10K_FW_STATS_BUF_SIZE (1024*1024)
static void ath10k_fw_stats_fill(struct ath10k *ar,
struct ath10k_fw_stats *fw_stats,
char *buf)
{
unsigned int len = 0;
unsigned int buf_len = ATH10K_FW_STATS_BUF_SIZE;
const struct ath10k_fw_stats_pdev *pdev;
const struct ath10k_fw_stats_peer *peer;
size_t num_peers;
spin_lock_bh(&ar->data_lock);
pdev = list_first_entry_or_null(&fw_stats->pdevs,
struct ath10k_fw_stats_pdev, list);
if (!pdev) {
ath10k_warn(ar, "failed to get pdev stats\n");
goto unlock;
}
num_peers = ath10k_debug_fw_stats_num_peers(&fw_stats->peers);
len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "%30s\n",
"ath10k PDEV stats");
......@@ -412,29 +423,29 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
"=================");
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Channel noise floor", fw_stats->ch_noise_floor);
"Channel noise floor", pdev->ch_noise_floor);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"Channel TX power", fw_stats->chan_tx_power);
"Channel TX power", pdev->chan_tx_power);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"TX frame count", fw_stats->tx_frame_count);
"TX frame count", pdev->tx_frame_count);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"RX frame count", fw_stats->rx_frame_count);
"RX frame count", pdev->rx_frame_count);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"RX clear count", fw_stats->rx_clear_count);
"RX clear count", pdev->rx_clear_count);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"Cycle count", fw_stats->cycle_count);
"Cycle count", pdev->cycle_count);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"PHY error count", fw_stats->phy_err_count);
"PHY error count", pdev->phy_err_count);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"RTS bad count", fw_stats->rts_bad);
"RTS bad count", pdev->rts_bad);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"RTS good count", fw_stats->rts_good);
"RTS good count", pdev->rts_good);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"FCS bad count", fw_stats->fcs_bad);
"FCS bad count", pdev->fcs_bad);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"No beacon count", fw_stats->no_beacons);
"No beacon count", pdev->no_beacons);
len += scnprintf(buf + len, buf_len - len, "%30s %10u\n",
"MIB int count", fw_stats->mib_int_count);
"MIB int count", pdev->mib_int_count);
len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "%30s\n",
......@@ -443,51 +454,51 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
"=================");
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"HTT cookies queued", fw_stats->comp_queued);
"HTT cookies queued", pdev->comp_queued);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"HTT cookies disp.", fw_stats->comp_delivered);
"HTT cookies disp.", pdev->comp_delivered);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MSDU queued", fw_stats->msdu_enqued);
"MSDU queued", pdev->msdu_enqued);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MPDU queued", fw_stats->mpdu_enqued);
"MPDU queued", pdev->mpdu_enqued);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MSDUs dropped", fw_stats->wmm_drop);
"MSDUs dropped", pdev->wmm_drop);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Local enqued", fw_stats->local_enqued);
"Local enqued", pdev->local_enqued);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Local freed", fw_stats->local_freed);
"Local freed", pdev->local_freed);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"HW queued", fw_stats->hw_queued);
"HW queued", pdev->hw_queued);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"PPDUs reaped", fw_stats->hw_reaped);
"PPDUs reaped", pdev->hw_reaped);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Num underruns", fw_stats->underrun);
"Num underruns", pdev->underrun);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"PPDUs cleaned", fw_stats->tx_abort);
"PPDUs cleaned", pdev->tx_abort);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MPDUs requed", fw_stats->mpdus_requed);
"MPDUs requed", pdev->mpdus_requed);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Excessive retries", fw_stats->tx_ko);
"Excessive retries", pdev->tx_ko);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"HW rate", fw_stats->data_rc);
"HW rate", pdev->data_rc);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Sched self tiggers", fw_stats->self_triggers);
"Sched self tiggers", pdev->self_triggers);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Dropped due to SW retries",
fw_stats->sw_retry_failure);
pdev->sw_retry_failure);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Illegal rate phy errors",
fw_stats->illgl_rate_phy_err);
pdev->illgl_rate_phy_err);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Pdev continous xretry", fw_stats->pdev_cont_xretry);
"Pdev continous xretry", pdev->pdev_cont_xretry);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"TX timeout", fw_stats->pdev_tx_timeout);
"TX timeout", pdev->pdev_tx_timeout);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"PDEV resets", fw_stats->pdev_resets);
"PDEV resets", pdev->pdev_resets);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"PHY underrun", fw_stats->phy_underrun);
"PHY underrun", pdev->phy_underrun);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MPDU is more than txop limit", fw_stats->txop_ovf);
"MPDU is more than txop limit", pdev->txop_ovf);
len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "%30s\n",
......@@ -497,70 +508,161 @@ static ssize_t ath10k_read_fw_stats(struct file *file, char __user *user_buf,
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Mid PPDU route change",
fw_stats->mid_ppdu_route_change);
pdev->mid_ppdu_route_change);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Tot. number of statuses", fw_stats->status_rcvd);
"Tot. number of statuses", pdev->status_rcvd);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Extra frags on rings 0", fw_stats->r0_frags);
"Extra frags on rings 0", pdev->r0_frags);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Extra frags on rings 1", fw_stats->r1_frags);
"Extra frags on rings 1", pdev->r1_frags);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Extra frags on rings 2", fw_stats->r2_frags);
"Extra frags on rings 2", pdev->r2_frags);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Extra frags on rings 3", fw_stats->r3_frags);
"Extra frags on rings 3", pdev->r3_frags);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MSDUs delivered to HTT", fw_stats->htt_msdus);
"MSDUs delivered to HTT", pdev->htt_msdus);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MPDUs delivered to HTT", fw_stats->htt_mpdus);
"MPDUs delivered to HTT", pdev->htt_mpdus);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MSDUs delivered to stack", fw_stats->loc_msdus);
"MSDUs delivered to stack", pdev->loc_msdus);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MPDUs delivered to stack", fw_stats->loc_mpdus);
"MPDUs delivered to stack", pdev->loc_mpdus);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Oversized AMSUs", fw_stats->oversize_amsdu);
"Oversized AMSUs", pdev->oversize_amsdu);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"PHY errors", fw_stats->phy_errs);
"PHY errors", pdev->phy_errs);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"PHY errors drops", fw_stats->phy_err_drop);
"PHY errors drops", pdev->phy_err_drop);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MPDU errors (FCS, MIC, ENC)", fw_stats->mpdu_errs);
"MPDU errors (FCS, MIC, ENC)", pdev->mpdu_errs);
len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "%30s (%d)\n",
"ath10k PEER stats", fw_stats->peers);
len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
"ath10k PEER stats", num_peers);
len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
"=================");
for (i = 0; i < fw_stats->peers; i++) {
list_for_each_entry(peer, &fw_stats->peers, list) {
len += scnprintf(buf + len, buf_len - len, "%30s %pM\n",
"Peer MAC address",
fw_stats->peer_stat[i].peer_macaddr);
"Peer MAC address", peer->peer_macaddr);
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
"Peer RSSI", fw_stats->peer_stat[i].peer_rssi);
"Peer RSSI", peer->peer_rssi);
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
"Peer TX rate",
fw_stats->peer_stat[i].peer_tx_rate);
"Peer TX rate", peer->peer_tx_rate);
len += scnprintf(buf + len, buf_len - len, "%30s %u\n",
"Peer RX rate",
fw_stats->peer_stat[i].peer_rx_rate);
"Peer RX rate", peer->peer_rx_rate);
len += scnprintf(buf + len, buf_len - len, "\n");
}
unlock:
spin_unlock_bh(&ar->data_lock);
if (len > buf_len)
len = buf_len;
if (len >= buf_len)
buf[len - 1] = 0;
else
buf[len] = 0;
}
ret_cnt = simple_read_from_buffer(user_buf, count, ppos, buf, len);
static int ath10k_fw_stats_open(struct inode *inode, struct file *file)
{
struct ath10k *ar = inode->i_private;
void *buf = NULL;
int ret;
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH10K_STATE_ON) {
ret = -ENETDOWN;
goto err_unlock;
}
buf = vmalloc(ATH10K_FW_STATS_BUF_SIZE);
if (!buf) {
ret = -ENOMEM;
goto err_unlock;
}
ret = ath10k_debug_fw_stats_request(ar);
if (ret) {
ath10k_warn(ar, "failed to request fw stats: %d\n", ret);
goto err_free;
}
ath10k_fw_stats_fill(ar, &ar->debug.fw_stats, buf);
file->private_data = buf;
exit:
mutex_unlock(&ar->conf_mutex);
kfree(buf);
return ret_cnt;
return 0;
err_free:
vfree(buf);
err_unlock:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static int ath10k_fw_stats_release(struct inode *inode, struct file *file)
{
vfree(file->private_data);
return 0;
}
static ssize_t ath10k_fw_stats_read(struct file *file, char __user *user_buf,
size_t count, loff_t *ppos)
{
const char *buf = file->private_data;
unsigned int len = strlen(buf);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static const struct file_operations fops_fw_stats = {
.read = ath10k_read_fw_stats,
.open = ath10k_fw_stats_open,
.release = ath10k_fw_stats_release,
.read = ath10k_fw_stats_read,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath10k_debug_fw_reset_stats_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
int ret, len, buf_len;
char *buf;
buf_len = 500;
buf = kmalloc(buf_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
spin_lock_bh(&ar->data_lock);
len = 0;
len += scnprintf(buf + len, buf_len - len,
"fw_crash_counter\t\t%d\n", ar->stats.fw_crash_counter);
len += scnprintf(buf + len, buf_len - len,
"fw_warm_reset_counter\t\t%d\n",
ar->stats.fw_warm_reset_counter);
len += scnprintf(buf + len, buf_len - len,
"fw_cold_reset_counter\t\t%d\n",
ar->stats.fw_cold_reset_counter);
spin_unlock_bh(&ar->data_lock);
ret = simple_read_from_buffer(user_buf, count, ppos, buf, len);
kfree(buf);
return ret;
}
static const struct file_operations fops_fw_reset_stats = {
.open = simple_open,
.read = ath10k_debug_fw_reset_stats_read,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
......@@ -1029,6 +1131,166 @@ static ssize_t ath10k_write_fw_dbglog(struct file *file,
return ret;
}
/* TODO: Would be nice to always support ethtool stats, would need to
* move the stats storage out of ath10k_debug, or always have ath10k_debug
* struct available..
*/
/* This generally cooresponds to the debugfs fw_stats file */
static const char ath10k_gstrings_stats[][ETH_GSTRING_LEN] = {
"tx_pkts_nic",
"tx_bytes_nic",
"rx_pkts_nic",
"rx_bytes_nic",
"d_noise_floor",
"d_cycle_count",
"d_phy_error",
"d_rts_bad",
"d_rts_good",
"d_tx_power", /* in .5 dbM I think */
"d_rx_crc_err", /* fcs_bad */
"d_no_beacon",
"d_tx_mpdus_queued",
"d_tx_msdu_queued",
"d_tx_msdu_dropped",
"d_local_enqued",
"d_local_freed",
"d_tx_ppdu_hw_queued",
"d_tx_ppdu_reaped",
"d_tx_fifo_underrun",
"d_tx_ppdu_abort",
"d_tx_mpdu_requed",
"d_tx_excessive_retries",
"d_tx_hw_rate",
"d_tx_dropped_sw_retries",
"d_tx_illegal_rate",
"d_tx_continuous_xretries",
"d_tx_timeout",
"d_tx_mpdu_txop_limit",
"d_pdev_resets",
"d_rx_mid_ppdu_route_change",
"d_rx_status",
"d_rx_extra_frags_ring0",
"d_rx_extra_frags_ring1",
"d_rx_extra_frags_ring2",
"d_rx_extra_frags_ring3",
"d_rx_msdu_htt",
"d_rx_mpdu_htt",
"d_rx_msdu_stack",
"d_rx_mpdu_stack",
"d_rx_phy_err",
"d_rx_phy_err_drops",
"d_rx_mpdu_errors", /* FCS, MIC, ENC */
"d_fw_crash_count",
"d_fw_warm_reset_count",
"d_fw_cold_reset_count",
};
#define ATH10K_SSTATS_LEN ARRAY_SIZE(ath10k_gstrings_stats)
void ath10k_debug_get_et_strings(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
u32 sset, u8 *data)
{
if (sset == ETH_SS_STATS)
memcpy(data, *ath10k_gstrings_stats,
sizeof(ath10k_gstrings_stats));
}
int ath10k_debug_get_et_sset_count(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, int sset)
{
if (sset == ETH_SS_STATS)
return ATH10K_SSTATS_LEN;
return 0;
}
void ath10k_debug_get_et_stats(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ethtool_stats *stats, u64 *data)
{
struct ath10k *ar = hw->priv;
static const struct ath10k_fw_stats_pdev zero_stats = {};
const struct ath10k_fw_stats_pdev *pdev_stats;
int i = 0, ret;
mutex_lock(&ar->conf_mutex);
if (ar->state == ATH10K_STATE_ON) {
ret = ath10k_debug_fw_stats_request(ar);
if (ret) {
/* just print a warning and try to use older results */
ath10k_warn(ar,
"failed to get fw stats for ethtool: %d\n",
ret);
}
}
pdev_stats = list_first_entry_or_null(&ar->debug.fw_stats.pdevs,
struct ath10k_fw_stats_pdev,
list);
if (!pdev_stats) {
/* no results available so just return zeroes */
pdev_stats = &zero_stats;
}
spin_lock_bh(&ar->data_lock);
data[i++] = pdev_stats->hw_reaped; /* ppdu reaped */
data[i++] = 0; /* tx bytes */
data[i++] = pdev_stats->htt_mpdus;
data[i++] = 0; /* rx bytes */
data[i++] = pdev_stats->ch_noise_floor;
data[i++] = pdev_stats->cycle_count;
data[i++] = pdev_stats->phy_err_count;
data[i++] = pdev_stats->rts_bad;
data[i++] = pdev_stats->rts_good;
data[i++] = pdev_stats->chan_tx_power;
data[i++] = pdev_stats->fcs_bad;
data[i++] = pdev_stats->no_beacons;
data[i++] = pdev_stats->mpdu_enqued;
data[i++] = pdev_stats->msdu_enqued;
data[i++] = pdev_stats->wmm_drop;
data[i++] = pdev_stats->local_enqued;
data[i++] = pdev_stats->local_freed;
data[i++] = pdev_stats->hw_queued;
data[i++] = pdev_stats->hw_reaped;
data[i++] = pdev_stats->underrun;
data[i++] = pdev_stats->tx_abort;
data[i++] = pdev_stats->mpdus_requed;
data[i++] = pdev_stats->tx_ko;
data[i++] = pdev_stats->data_rc;
data[i++] = pdev_stats->sw_retry_failure;
data[i++] = pdev_stats->illgl_rate_phy_err;
data[i++] = pdev_stats->pdev_cont_xretry;
data[i++] = pdev_stats->pdev_tx_timeout;
data[i++] = pdev_stats->txop_ovf;
data[i++] = pdev_stats->pdev_resets;
data[i++] = pdev_stats->mid_ppdu_route_change;
data[i++] = pdev_stats->status_rcvd;
data[i++] = pdev_stats->r0_frags;
data[i++] = pdev_stats->r1_frags;
data[i++] = pdev_stats->r2_frags;
data[i++] = pdev_stats->r3_frags;
data[i++] = pdev_stats->htt_msdus;
data[i++] = pdev_stats->htt_mpdus;
data[i++] = pdev_stats->loc_msdus;
data[i++] = pdev_stats->loc_mpdus;
data[i++] = pdev_stats->phy_errs;
data[i++] = pdev_stats->phy_err_drop;
data[i++] = pdev_stats->mpdu_errs;
data[i++] = ar->stats.fw_crash_counter;
data[i++] = ar->stats.fw_warm_reset_counter;
data[i++] = ar->stats.fw_cold_reset_counter;
spin_unlock_bh(&ar->data_lock);
mutex_unlock(&ar->conf_mutex);
WARN_ON(i != ATH10K_SSTATS_LEN);
}
static const struct file_operations fops_fw_dbglog = {
.read = ath10k_read_fw_dbglog,
.write = ath10k_write_fw_dbglog,
......@@ -1037,6 +1299,84 @@ static const struct file_operations fops_fw_dbglog = {
.llseek = default_llseek,
};
static int ath10k_debug_cal_data_open(struct inode *inode, struct file *file)
{
struct ath10k *ar = inode->i_private;
void *buf;
u32 hi_addr;
__le32 addr;
int ret;
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH10K_STATE_ON &&
ar->state != ATH10K_STATE_UTF) {
ret = -ENETDOWN;
goto err;
}
buf = vmalloc(QCA988X_CAL_DATA_LEN);
if (!buf) {
ret = -ENOMEM;
goto err;
}
hi_addr = host_interest_item_address(HI_ITEM(hi_board_data));
ret = ath10k_hif_diag_read(ar, hi_addr, &addr, sizeof(addr));
if (ret) {
ath10k_warn(ar, "failed to read hi_board_data address: %d\n", ret);
goto err_vfree;
}
ret = ath10k_hif_diag_read(ar, le32_to_cpu(addr), buf,
QCA988X_CAL_DATA_LEN);
if (ret) {
ath10k_warn(ar, "failed to read calibration data: %d\n", ret);
goto err_vfree;
}
file->private_data = buf;
mutex_unlock(&ar->conf_mutex);
return 0;
err_vfree:
vfree(buf);
err:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static ssize_t ath10k_debug_cal_data_read(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
void *buf = file->private_data;
return simple_read_from_buffer(user_buf, count, ppos,
buf, QCA988X_CAL_DATA_LEN);
}
static int ath10k_debug_cal_data_release(struct inode *inode,
struct file *file)
{
vfree(file->private_data);
return 0;
}
static const struct file_operations fops_cal_data = {
.open = ath10k_debug_cal_data_open,
.read = ath10k_debug_cal_data_read,
.release = ath10k_debug_cal_data_release,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
int ath10k_debug_start(struct ath10k *ar)
{
int ret;
......@@ -1057,7 +1397,22 @@ int ath10k_debug_start(struct ath10k *ar)
ret);
}
return 0;
if (ar->debug.pktlog_filter) {
ret = ath10k_wmi_pdev_pktlog_enable(ar,
ar->debug.pktlog_filter);
if (ret)
/* not serious */
ath10k_warn(ar,
"failed to enable pktlog filter %x: %d\n",
ar->debug.pktlog_filter, ret);
} else {
ret = ath10k_wmi_pdev_pktlog_disable(ar);
if (ret)
/* not serious */
ath10k_warn(ar, "failed to disable pktlog: %d\n", ret);
}
return ret;
}
void ath10k_debug_stop(struct ath10k *ar)
......@@ -1072,6 +1427,8 @@ void ath10k_debug_stop(struct ath10k *ar)
ar->debug.htt_max_amsdu = 0;
ar->debug.htt_max_ampdu = 0;
ath10k_wmi_pdev_pktlog_disable(ar);
}
static ssize_t ath10k_write_simulate_radar(struct file *file,
......@@ -1154,12 +1511,78 @@ static const struct file_operations fops_dfs_stats = {
.llseek = default_llseek,
};
static ssize_t ath10k_write_pktlog_filter(struct file *file,
const char __user *ubuf,
size_t count, loff_t *ppos)
{
struct ath10k *ar = file->private_data;
u32 filter;
int ret;
if (kstrtouint_from_user(ubuf, count, 0, &filter))
return -EINVAL;
mutex_lock(&ar->conf_mutex);
if (ar->state != ATH10K_STATE_ON) {
ar->debug.pktlog_filter = filter;
ret = count;
goto out;
}
if (filter && (filter != ar->debug.pktlog_filter)) {
ret = ath10k_wmi_pdev_pktlog_enable(ar, filter);
if (ret) {
ath10k_warn(ar, "failed to enable pktlog filter %x: %d\n",
ar->debug.pktlog_filter, ret);
goto out;
}
} else {
ret = ath10k_wmi_pdev_pktlog_disable(ar);
if (ret) {
ath10k_warn(ar, "failed to disable pktlog: %d\n", ret);
goto out;
}
}
ar->debug.pktlog_filter = filter;
ret = count;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static ssize_t ath10k_read_pktlog_filter(struct file *file, char __user *ubuf,
size_t count, loff_t *ppos)
{
char buf[32];
struct ath10k *ar = file->private_data;
int len = 0;
mutex_lock(&ar->conf_mutex);
len = scnprintf(buf, sizeof(buf) - len, "%08x\n",
ar->debug.pktlog_filter);
mutex_unlock(&ar->conf_mutex);
return simple_read_from_buffer(ubuf, count, ppos, buf, len);
}
static const struct file_operations fops_pktlog_filter = {
.read = ath10k_read_pktlog_filter,
.write = ath10k_write_pktlog_filter,
.open = simple_open
};
int ath10k_debug_create(struct ath10k *ar)
{
ar->debug.fw_crash_data = vzalloc(sizeof(*ar->debug.fw_crash_data));
if (!ar->debug.fw_crash_data)
return -ENOMEM;
INIT_LIST_HEAD(&ar->debug.fw_stats.pdevs);
INIT_LIST_HEAD(&ar->debug.fw_stats.peers);
return 0;
}
......@@ -1167,6 +1590,8 @@ void ath10k_debug_destroy(struct ath10k *ar)
{
vfree(ar->debug.fw_crash_data);
ar->debug.fw_crash_data = NULL;
ath10k_debug_fw_stats_reset(ar);
}
int ath10k_debug_register(struct ath10k *ar)
......@@ -1183,11 +1608,14 @@ int ath10k_debug_register(struct ath10k *ar)
INIT_DELAYED_WORK(&ar->debug.htt_stats_dwork,
ath10k_debug_htt_stats_dwork);
init_completion(&ar->debug.event_stats_compl);
init_completion(&ar->debug.fw_stats_complete);
debugfs_create_file("fw_stats", S_IRUSR, ar->debug.debugfs_phy, ar,
&fops_fw_stats);
debugfs_create_file("fw_reset_stats", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_fw_reset_stats);
debugfs_create_file("wmi_services", S_IRUSR, ar->debug.debugfs_phy, ar,
&fops_wmi_services);
......@@ -1210,6 +1638,9 @@ int ath10k_debug_register(struct ath10k *ar)
debugfs_create_file("fw_dbglog", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_fw_dbglog);
debugfs_create_file("cal_data", S_IRUSR, ar->debug.debugfs_phy,
ar, &fops_cal_data);
if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) {
debugfs_create_file("dfs_simulate_radar", S_IWUSR,
ar->debug.debugfs_phy, ar,
......@@ -1224,6 +1655,9 @@ int ath10k_debug_register(struct ath10k *ar)
&fops_dfs_stats);
}
debugfs_create_file("pktlog_filter", S_IRUGO | S_IWUSR,
ar->debug.debugfs_phy, ar, &fops_pktlog_filter);
return 0;
}
......@@ -1260,11 +1694,26 @@ void ath10k_dbg_dump(struct ath10k *ar,
const char *msg, const char *prefix,
const void *buf, size_t len)
{
char linebuf[256];
unsigned int linebuflen;
const void *ptr;
if (ath10k_debug_mask & mask) {
if (msg)
ath10k_dbg(ar, mask, "%s\n", msg);
print_hex_dump_bytes(prefix, DUMP_PREFIX_OFFSET, buf, len);
for (ptr = buf; (ptr - buf) < len; ptr += 16) {
linebuflen = 0;
linebuflen += scnprintf(linebuf + linebuflen,
sizeof(linebuf) - linebuflen,
"%s%08x: ",
(prefix ? prefix : ""),
(unsigned int)(ptr - buf));
hex_dump_to_buffer(ptr, len - (ptr - buf), 16, 1,
linebuf + linebuflen,
sizeof(linebuf) - linebuflen, true);
dev_printk(KERN_DEBUG, ar->dev, "%s\n", linebuf);
}
}
/* tracing code doesn't like null strings :/ */
......
......@@ -38,11 +38,20 @@ enum ath10k_debug_mask {
ATH10K_DBG_ANY = 0xffffffff,
};
enum ath10k_pktlog_filter {
ATH10K_PKTLOG_RX = 0x000000001,
ATH10K_PKTLOG_TX = 0x000000002,
ATH10K_PKTLOG_RCFIND = 0x000000004,
ATH10K_PKTLOG_RCUPDATE = 0x000000008,
ATH10K_PKTLOG_DBG_PRINT = 0x000000010,
ATH10K_PKTLOG_ANY = 0x00000001f,
};
extern unsigned int ath10k_debug_mask;
__printf(2, 3) int ath10k_info(struct ath10k *ar, const char *fmt, ...);
__printf(2, 3) int ath10k_err(struct ath10k *ar, const char *fmt, ...);
__printf(2, 3) int ath10k_warn(struct ath10k *ar, const char *fmt, ...);
__printf(2, 3) void ath10k_info(struct ath10k *ar, const char *fmt, ...);
__printf(2, 3) void ath10k_err(struct ath10k *ar, const char *fmt, ...);
__printf(2, 3) void ath10k_warn(struct ath10k *ar, const char *fmt, ...);
void ath10k_print_driver_info(struct ath10k *ar);
#ifdef CONFIG_ATH10K_DEBUGFS
......@@ -53,17 +62,24 @@ void ath10k_debug_destroy(struct ath10k *ar);
int ath10k_debug_register(struct ath10k *ar);
void ath10k_debug_unregister(struct ath10k *ar);
void ath10k_debug_read_service_map(struct ath10k *ar,
void *service_map,
const void *service_map,
size_t map_size);
void ath10k_debug_read_target_stats(struct ath10k *ar,
struct wmi_stats_event *ev);
void ath10k_debug_fw_stats_process(struct ath10k *ar, struct sk_buff *skb);
struct ath10k_fw_crash_data *
ath10k_debug_get_new_fw_crash_data(struct ath10k *ar);
void ath10k_debug_dbglog_add(struct ath10k *ar, u8 *buffer, int len);
#define ATH10K_DFS_STAT_INC(ar, c) (ar->debug.dfs_stats.c++)
void ath10k_debug_get_et_strings(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
u32 sset, u8 *data);
int ath10k_debug_get_et_sset_count(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, int sset);
void ath10k_debug_get_et_stats(struct ieee80211_hw *hw,
struct ieee80211_vif *vif,
struct ethtool_stats *stats, u64 *data);
#else
static inline int ath10k_debug_start(struct ath10k *ar)
{
......@@ -93,13 +109,13 @@ static inline void ath10k_debug_unregister(struct ath10k *ar)
}
static inline void ath10k_debug_read_service_map(struct ath10k *ar,
void *service_map,
const void *service_map,
size_t map_size)
{
}
static inline void ath10k_debug_read_target_stats(struct ath10k *ar,
struct wmi_stats_event *ev)
static inline void ath10k_debug_fw_stats_process(struct ath10k *ar,
struct sk_buff *skb)
{
}
......@@ -116,6 +132,10 @@ ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
#define ATH10K_DFS_STAT_INC(ar, c) do { } while (0)
#define ath10k_debug_get_et_strings NULL
#define ath10k_debug_get_et_sset_count NULL
#define ath10k_debug_get_et_stats NULL
#endif /* CONFIG_ATH10K_DEBUGFS */
#ifdef CONFIG_ATH10K_DEBUG
......
......@@ -43,6 +43,10 @@ struct ath10k_hif_ops {
int (*tx_sg)(struct ath10k *ar, u8 pipe_id,
struct ath10k_hif_sg_item *items, int n_items);
/* read firmware memory through the diagnose interface */
int (*diag_read)(struct ath10k *ar, u32 address, void *buf,
size_t buf_len);
/*
* API to handle HIF-specific BMI message exchanges, this API is
* synchronous and only allowed to be called from a context that
......@@ -98,6 +102,12 @@ static inline int ath10k_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
return ar->hif.ops->tx_sg(ar, pipe_id, items, n_items);
}
static inline int ath10k_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
size_t buf_len)
{
return ar->hif.ops->diag_read(ar, address, buf, buf_len);
}
static inline int ath10k_hif_exchange_bmi_msg(struct ath10k *ar,
void *request, u32 request_len,
void *response, u32 *response_len)
......
......@@ -725,7 +725,7 @@ static inline u8 *htt_rx_test_get_chars(struct htt_rx_test *rx_test)
*/
struct htt_pktlog_msg {
u8 pad[3];
__le32 payload[1 /* or more */];
u8 payload[0];
} __packed;
struct htt_dbg_stats_rx_reorder_stats {
......
......@@ -291,6 +291,9 @@ static inline struct sk_buff *ath10k_htt_rx_netbuf_pop(struct ath10k_htt *htt)
htt->rx_ring.sw_rd_idx.msdu_payld = idx;
htt->rx_ring.fill_cnt--;
trace_ath10k_htt_rx_pop_msdu(ar, msdu->data, msdu->len +
skb_tailroom(msdu));
return msdu;
}
......@@ -316,6 +319,7 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
int msdu_len, msdu_chaining = 0;
struct sk_buff *msdu, *next;
struct htt_rx_desc *rx_desc;
u32 tsf;
lockdep_assert_held(&htt->rx_ring.lock);
......@@ -447,6 +451,9 @@ static int ath10k_htt_rx_amsdu_pop(struct ath10k_htt *htt,
last_msdu = __le32_to_cpu(rx_desc->msdu_end.info0) &
RX_MSDU_END_INFO0_LAST_MSDU;
tsf = __le32_to_cpu(rx_desc->ppdu_end.tsf_timestamp);
trace_ath10k_htt_rx_desc(ar, tsf, &rx_desc->attention,
sizeof(*rx_desc) - sizeof(u32));
if (last_msdu) {
msdu->next = NULL;
break;
......@@ -1674,6 +1681,15 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
case HTT_T2H_MSG_TYPE_RX_DELBA:
ath10k_htt_rx_delba(ar, resp);
break;
case HTT_T2H_MSG_TYPE_PKTLOG: {
struct ath10k_pktlog_hdr *hdr =
(struct ath10k_pktlog_hdr *)resp->pktlog_msg.payload;
trace_ath10k_htt_pktlog(ar, resp->pktlog_msg.payload,
sizeof(*hdr) +
__le16_to_cpu(hdr->size));
break;
}
case HTT_T2H_MSG_TYPE_RX_FLUSH: {
/* Ignore this event because mac80211 takes care of Rx
* aggregation reordering.
......@@ -1681,8 +1697,8 @@ void ath10k_htt_t2h_msg_handler(struct ath10k *ar, struct sk_buff *skb)
break;
}
default:
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt event (%d) not handled\n",
resp->hdr.msg_type);
ath10k_warn(ar, "htt event (%d) not handled\n",
resp->hdr.msg_type);
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt event: ",
skb->data, skb->len);
break;
......
......@@ -557,12 +557,14 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr);
skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le32(HTT_INVALID_PEERID);
trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid);
ath10k_dbg(ar, ATH10K_DBG_HTT,
"htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu\n",
flags0, flags1, msdu->len, msdu_id, frags_paddr,
(u32)skb_cb->paddr, vdev_id, tid);
ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ",
msdu->data, msdu->len);
trace_ath10k_htt_tx_msdu(ar, msdu->data, msdu->len);
sg_items[0].transfer_id = 0;
sg_items[0].transfer_context = NULL;
......
......@@ -20,15 +20,16 @@
#include "targaddrs.h"
#define ATH10K_FW_DIR "ath10k"
/* QCA988X 1.0 definitions (unsupported) */
#define QCA988X_HW_1_0_CHIP_ID_REV 0x0
/* QCA988X 2.0 definitions */
#define QCA988X_HW_2_0_VERSION 0x4100016c
#define QCA988X_HW_2_0_CHIP_ID_REV 0x2
#define QCA988X_HW_2_0_FW_DIR "ath10k/QCA988X/hw2.0"
#define QCA988X_HW_2_0_FW_DIR ATH10K_FW_DIR "/QCA988X/hw2.0"
#define QCA988X_HW_2_0_FW_FILE "firmware.bin"
#define QCA988X_HW_2_0_FW_3_FILE "firmware-3.bin"
#define QCA988X_HW_2_0_OTP_FILE "otp.bin"
#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
......@@ -43,6 +44,8 @@
#define REG_DUMP_COUNT_QCA988X 60
#define QCA988X_CAL_DATA_LEN 2116
struct ath10k_fw_ie {
__le32 id;
__le32 len;
......@@ -78,6 +81,15 @@ enum ath10k_mcast2ucast_mode {
ATH10K_MCAST2UCAST_ENABLED = 1,
};
struct ath10k_pktlog_hdr {
__le16 flags;
__le16 missed_cnt;
__le16 log_type;
__le16 size;
__le32 timestamp;
u8 payload[0];
} __packed;
/* Target specific defines for MAIN firmware */
#define TARGET_NUM_VDEVS 8
#define TARGET_NUM_PEER_AST 2
......@@ -279,6 +291,7 @@ enum ath10k_mcast2ucast_mode {
#define SI_RX_DATA1_OFFSET 0x00000014
#define CORE_CTRL_CPU_INTR_MASK 0x00002000
#define CORE_CTRL_PCIE_REG_31_MASK 0x00000800
#define CORE_CTRL_ADDRESS 0x0000
#define PCIE_INTR_ENABLE_ADDRESS 0x0008
#define PCIE_INTR_CAUSE_ADDRESS 0x000c
......
......@@ -479,6 +479,40 @@ static void ath10k_peer_cleanup_all(struct ath10k *ar)
/* Interface management */
/************************/
void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif)
{
struct ath10k *ar = arvif->ar;
lockdep_assert_held(&ar->data_lock);
if (!arvif->beacon)
return;
if (!arvif->beacon_buf)
dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr,
arvif->beacon->len, DMA_TO_DEVICE);
dev_kfree_skb_any(arvif->beacon);
arvif->beacon = NULL;
arvif->beacon_sent = false;
}
static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif)
{
struct ath10k *ar = arvif->ar;
lockdep_assert_held(&ar->data_lock);
ath10k_mac_vif_beacon_free(arvif);
if (arvif->beacon_buf) {
dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
arvif->beacon_buf, arvif->beacon_paddr);
arvif->beacon_buf = NULL;
}
}
static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
{
int ret;
......@@ -590,9 +624,9 @@ static int ath10k_monitor_vdev_create(struct ath10k *ar)
return -ENOMEM;
}
bit = ffs(ar->free_vdev_map);
bit = __ffs64(ar->free_vdev_map);
ar->monitor_vdev_id = bit - 1;
ar->monitor_vdev_id = bit;
ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id,
WMI_VDEV_TYPE_MONITOR,
......@@ -603,7 +637,7 @@ static int ath10k_monitor_vdev_create(struct ath10k *ar)
return ret;
}
ar->free_vdev_map &= ~(1 << ar->monitor_vdev_id);
ar->free_vdev_map &= ~(1LL << ar->monitor_vdev_id);
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
ar->monitor_vdev_id);
......@@ -623,7 +657,7 @@ static int ath10k_monitor_vdev_delete(struct ath10k *ar)
return ret;
}
ar->free_vdev_map |= 1 << ar->monitor_vdev_id;
ar->free_vdev_map |= 1LL << ar->monitor_vdev_id;
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
ar->monitor_vdev_id);
......@@ -909,15 +943,7 @@ static void ath10k_control_beaconing(struct ath10k_vif *arvif,
arvif->is_up = false;
spin_lock_bh(&arvif->ar->data_lock);
if (arvif->beacon) {
dma_unmap_single(arvif->ar->dev,
ATH10K_SKB_CB(arvif->beacon)->paddr,
arvif->beacon->len, DMA_TO_DEVICE);
dev_kfree_skb_any(arvif->beacon);
arvif->beacon = NULL;
arvif->beacon_sent = false;
}
ath10k_mac_vif_beacon_free(arvif);
spin_unlock_bh(&arvif->ar->data_lock);
return;
......@@ -966,14 +992,6 @@ static void ath10k_control_ibss(struct ath10k_vif *arvif,
if (is_zero_ether_addr(arvif->bssid))
return;
ret = ath10k_peer_delete(arvif->ar, arvif->vdev_id,
arvif->bssid);
if (ret) {
ath10k_warn(ar, "failed to delete IBSS BSSID peer %pM for vdev %d: %d\n",
arvif->bssid, arvif->vdev_id, ret);
return;
}
memset(arvif->bssid, 0, ETH_ALEN);
return;
......@@ -1042,51 +1060,45 @@ static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
/* Station management */
/**********************/
static u32 ath10k_peer_assoc_h_listen_intval(struct ath10k *ar,
struct ieee80211_vif *vif)
{
/* Some firmware revisions have unstable STA powersave when listen
* interval is set too high (e.g. 5). The symptoms are firmware doesn't
* generate NullFunc frames properly even if buffered frames have been
* indicated in Beacon TIM. Firmware would seldom wake up to pull
* buffered frames. Often pinging the device from AP would simply fail.
*
* As a workaround set it to 1.
*/
if (vif->type == NL80211_IFTYPE_STATION)
return 1;
return ar->hw->conf.listen_interval;
}
static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
struct ath10k_vif *arvif,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_bss_conf *bss_conf,
struct wmi_peer_assoc_complete_arg *arg)
{
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
lockdep_assert_held(&ar->conf_mutex);
ether_addr_copy(arg->addr, sta->addr);
arg->vdev_id = arvif->vdev_id;
arg->peer_aid = sta->aid;
arg->peer_flags |= WMI_PEER_AUTH;
if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
/*
* Seems FW have problems with Power Save in STA
* mode when we setup this parameter to high (eg. 5).
* Often we see that FW don't send NULL (with clean P flags)
* frame even there is info about buffered frames in beacons.
* Sometimes we have to wait more than 10 seconds before FW
* will wakeup. Often sending one ping from AP to our device
* just fail (more than 50%).
*
* Seems setting this FW parameter to 1 couse FW
* will check every beacon and will wakup immediately
* after detection buffered data.
*/
arg->peer_listen_intval = 1;
else
arg->peer_listen_intval = ar->hw->conf.listen_interval;
arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif);
arg->peer_num_spatial_streams = 1;
/*
* The assoc capabilities are available only in managed mode.
*/
if (arvif->vdev_type == WMI_VDEV_TYPE_STA && bss_conf)
arg->peer_caps = bss_conf->assoc_capability;
arg->peer_caps = vif->bss_conf.assoc_capability;
}
static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
struct ath10k_vif *arvif,
struct ieee80211_vif *vif,
struct wmi_peer_assoc_complete_arg *arg)
{
struct ieee80211_vif *vif = arvif->vif;
struct ieee80211_bss_conf *info = &vif->bss_conf;
struct cfg80211_bss *bss;
const u8 *rsnie = NULL;
......@@ -1343,11 +1355,12 @@ static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
}
static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
struct ath10k_vif *arvif,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_bss_conf *bss_conf,
struct wmi_peer_assoc_complete_arg *arg)
{
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
switch (arvif->vdev_type) {
case WMI_VDEV_TYPE_AP:
if (sta->wme)
......@@ -1359,7 +1372,7 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
}
break;
case WMI_VDEV_TYPE_STA:
if (bss_conf->qos)
if (vif->bss_conf.qos)
arg->peer_flags |= WMI_PEER_QOS;
break;
default:
......@@ -1368,7 +1381,7 @@ static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
}
static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
struct ath10k_vif *arvif,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct wmi_peer_assoc_complete_arg *arg)
{
......@@ -1419,22 +1432,21 @@ static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
}
static int ath10k_peer_assoc_prepare(struct ath10k *ar,
struct ath10k_vif *arvif,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
struct ieee80211_bss_conf *bss_conf,
struct wmi_peer_assoc_complete_arg *arg)
{
lockdep_assert_held(&ar->conf_mutex);
memset(arg, 0, sizeof(*arg));
ath10k_peer_assoc_h_basic(ar, arvif, sta, bss_conf, arg);
ath10k_peer_assoc_h_crypto(ar, arvif, arg);
ath10k_peer_assoc_h_basic(ar, vif, sta, arg);
ath10k_peer_assoc_h_crypto(ar, vif, arg);
ath10k_peer_assoc_h_rates(ar, sta, arg);
ath10k_peer_assoc_h_ht(ar, sta, arg);
ath10k_peer_assoc_h_vht(ar, sta, arg);
ath10k_peer_assoc_h_qos(ar, arvif, sta, bss_conf, arg);
ath10k_peer_assoc_h_phymode(ar, arvif, sta, arg);
ath10k_peer_assoc_h_qos(ar, vif, sta, arg);
ath10k_peer_assoc_h_phymode(ar, vif, sta, arg);
return 0;
}
......@@ -1480,6 +1492,9 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
lockdep_assert_held(&ar->conf_mutex);
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n",
arvif->vdev_id, arvif->bssid, arvif->aid);
rcu_read_lock();
ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
......@@ -1494,8 +1509,7 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
* before calling ath10k_setup_peer_smps() which might sleep. */
ht_cap = ap_sta->ht_cap;
ret = ath10k_peer_assoc_prepare(ar, arvif, ap_sta,
bss_conf, &peer_arg);
ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg);
if (ret) {
ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n",
bss_conf->bssid, arvif->vdev_id, ret);
......@@ -1523,6 +1537,8 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
"mac vdev %d up (associated) bssid %pM aid %d\n",
arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
WARN_ON(arvif->is_up);
arvif->aid = bss_conf->aid;
ether_addr_copy(arvif->bssid, bss_conf->bssid);
......@@ -1536,9 +1552,6 @@ static void ath10k_bss_assoc(struct ieee80211_hw *hw,
arvif->is_up = true;
}
/*
* FIXME: flush TIDs
*/
static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
struct ieee80211_vif *vif)
{
......@@ -1548,45 +1561,30 @@ static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
lockdep_assert_held(&ar->conf_mutex);
/*
* For some reason, calling VDEV-DOWN before VDEV-STOP
* makes the FW to send frames via HTT after disassociation.
* No idea why this happens, even though VDEV-DOWN is supposed
* to be analogous to link down, so just stop the VDEV.
*/
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d stop (disassociated\n",
arvif->vdev_id);
/* FIXME: check return value */
ret = ath10k_vdev_stop(arvif);
/*
* If we don't call VDEV-DOWN after VDEV-STOP FW will remain active and
* report beacons from previously associated network through HTT.
* This in turn would spam mac80211 WARN_ON if we bring down all
* interfaces as it expects there is no rx when no interface is
* running.
*/
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d down\n", arvif->vdev_id);
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n",
arvif->vdev_id, arvif->bssid);
/* FIXME: why don't we print error if wmi call fails? */
ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
if (ret)
ath10k_warn(ar, "faield to down vdev %i: %d\n",
arvif->vdev_id, ret);
arvif->def_wep_key_idx = 0;
arvif->is_started = false;
arvif->is_up = false;
}
static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
struct ieee80211_sta *sta, bool reassoc)
static int ath10k_station_assoc(struct ath10k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta,
bool reassoc)
{
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
struct wmi_peer_assoc_complete_arg peer_arg;
int ret = 0;
lockdep_assert_held(&ar->conf_mutex);
ret = ath10k_peer_assoc_prepare(ar, arvif, sta, NULL, &peer_arg);
ret = ath10k_peer_assoc_prepare(ar, vif, sta, &peer_arg);
if (ret) {
ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n",
sta->addr, arvif->vdev_id, ret);
......@@ -1601,43 +1599,51 @@ static int ath10k_station_assoc(struct ath10k *ar, struct ath10k_vif *arvif,
return ret;
}
ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, &sta->ht_cap);
if (ret) {
ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n",
arvif->vdev_id, ret);
return ret;
}
if (!sta->wme && !reassoc) {
arvif->num_legacy_stations++;
ret = ath10k_recalc_rtscts_prot(arvif);
/* Re-assoc is run only to update supported rates for given station. It
* doesn't make much sense to reconfigure the peer completely.
*/
if (!reassoc) {
ret = ath10k_setup_peer_smps(ar, arvif, sta->addr,
&sta->ht_cap);
if (ret) {
ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n",
arvif->vdev_id, ret);
return ret;
}
}
ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
if (ret) {
ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
if (ret) {
ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n",
sta->addr, arvif->vdev_id, ret);
return ret;
}
ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
if (ret) {
ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n",
sta->addr, arvif->vdev_id, ret);
return ret;
if (!sta->wme) {
arvif->num_legacy_stations++;
ret = ath10k_recalc_rtscts_prot(arvif);
if (ret) {
ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
arvif->vdev_id, ret);
return ret;
}
}
ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
if (ret) {
ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n",
arvif->vdev_id, ret);
return ret;
}
}
return ret;
}
static int ath10k_station_disassoc(struct ath10k *ar, struct ath10k_vif *arvif,
static int ath10k_station_disassoc(struct ath10k *ar,
struct ieee80211_vif *vif,
struct ieee80211_sta *sta)
{
struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
int ret = 0;
lockdep_assert_held(&ar->conf_mutex);
......@@ -1729,6 +1735,7 @@ static int ath10k_update_channel_list(struct ath10k *ar)
ch->passive = passive;
ch->freq = channel->center_freq;
ch->band_center_freq1 = channel->center_freq;
ch->min_power = 0;
ch->max_power = channel->max_power * 2;
ch->max_reg_power = channel->max_reg_power * 2;
......@@ -2376,16 +2383,8 @@ void ath10k_halt(struct ath10k *ar)
ath10k_hif_power_down(ar);
spin_lock_bh(&ar->data_lock);
list_for_each_entry(arvif, &ar->arvifs, list) {
if (!arvif->beacon)
continue;
dma_unmap_single(arvif->ar->dev,
ATH10K_SKB_CB(arvif->beacon)->paddr,
arvif->beacon->len, DMA_TO_DEVICE);
dev_kfree_skb_any(arvif->beacon);
arvif->beacon = NULL;
}
list_for_each_entry(arvif, &ar->arvifs, list)
ath10k_mac_vif_beacon_cleanup(arvif);
spin_unlock_bh(&ar->data_lock);
}
......@@ -2677,12 +2676,68 @@ static void ath10k_config_chan(struct ath10k *ar)
ath10k_monitor_recalc(ar);
}
static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower)
{
int ret;
u32 param;
lockdep_assert_held(&ar->conf_mutex);
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower %d\n", txpower);
param = ar->wmi.pdev_param->txpower_limit2g;
ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
if (ret) {
ath10k_warn(ar, "failed to set 2g txpower %d: %d\n",
txpower, ret);
return ret;
}
param = ar->wmi.pdev_param->txpower_limit5g;
ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
if (ret) {
ath10k_warn(ar, "failed to set 5g txpower %d: %d\n",
txpower, ret);
return ret;
}
return 0;
}
static int ath10k_mac_txpower_recalc(struct ath10k *ar)
{
struct ath10k_vif *arvif;
int ret, txpower = -1;
lockdep_assert_held(&ar->conf_mutex);
list_for_each_entry(arvif, &ar->arvifs, list) {
WARN_ON(arvif->txpower < 0);
if (txpower == -1)
txpower = arvif->txpower;
else
txpower = min(txpower, arvif->txpower);
}
if (WARN_ON(txpower == -1))
return -EINVAL;
ret = ath10k_mac_txpower_setup(ar, txpower);
if (ret) {
ath10k_warn(ar, "failed to setup tx power %d: %d\n",
txpower, ret);
return ret;
}
return 0;
}
static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
{
struct ath10k *ar = hw->priv;
struct ieee80211_conf *conf = &hw->conf;
int ret = 0;
u32 param;
mutex_lock(&ar->conf_mutex);
......@@ -2706,25 +2761,6 @@ static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
}
}
if (changed & IEEE80211_CONF_CHANGE_POWER) {
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac config power %d\n",
hw->conf.power_level);
param = ar->wmi.pdev_param->txpower_limit2g;
ret = ath10k_wmi_pdev_set_param(ar, param,
hw->conf.power_level * 2);
if (ret)
ath10k_warn(ar, "failed to set 2g txpower %d: %d\n",
hw->conf.power_level, ret);
param = ar->wmi.pdev_param->txpower_limit5g;
ret = ath10k_wmi_pdev_set_param(ar, param,
hw->conf.power_level * 2);
if (ret)
ath10k_warn(ar, "failed to set 5g txpower %d: %d\n",
hw->conf.power_level, ret);
}
if (changed & IEEE80211_CONF_CHANGE_PS)
ath10k_config_ps(ar);
......@@ -2772,9 +2808,12 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
ret = -EBUSY;
goto err;
}
bit = ffs(ar->free_vdev_map);
bit = __ffs64(ar->free_vdev_map);
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac create vdev %i map %llx\n",
bit, ar->free_vdev_map);
arvif->vdev_id = bit - 1;
arvif->vdev_id = bit;
arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE;
if (ar->p2p)
......@@ -2804,8 +2843,39 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
break;
}
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d\n",
arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype);
/* Some firmware revisions don't wait for beacon tx completion before
* sending another SWBA event. This could lead to hardware using old
* (freed) beacon data in some cases, e.g. tx credit starvation
* combined with missed TBTT. This is very very rare.
*
* On non-IOMMU-enabled hosts this could be a possible security issue
* because hw could beacon some random data on the air. On
* IOMMU-enabled hosts DMAR faults would occur in most cases and target
* device would crash.
*
* Since there are no beacon tx completions (implicit nor explicit)
* propagated to host the only workaround for this is to allocate a
* DMA-coherent buffer for a lifetime of a vif and use it for all
* beacon tx commands. Worst case for this approach is some beacons may
* become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap.
*/
if (vif->type == NL80211_IFTYPE_ADHOC ||
vif->type == NL80211_IFTYPE_AP) {
arvif->beacon_buf = dma_zalloc_coherent(ar->dev,
IEEE80211_MAX_FRAME_LEN,
&arvif->beacon_paddr,
GFP_ATOMIC);
if (!arvif->beacon_buf) {
ret = -ENOMEM;
ath10k_warn(ar, "failed to allocate beacon buffer: %d\n",
ret);
goto err;
}
}
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n",
arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
arvif->beacon_buf ? "single-buf" : "per-skb");
ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
arvif->vdev_subtype, vif->addr);
......@@ -2815,7 +2885,7 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
goto err;
}
ar->free_vdev_map &= ~(1 << arvif->vdev_id);
ar->free_vdev_map &= ~(1LL << arvif->vdev_id);
list_add(&arvif->list, &ar->arvifs);
vdev_param = ar->wmi.vdev_param->def_keyid;
......@@ -2899,6 +2969,13 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
goto err_peer_delete;
}
arvif->txpower = vif->bss_conf.txpower;
ret = ath10k_mac_txpower_recalc(ar);
if (ret) {
ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
goto err_peer_delete;
}
mutex_unlock(&ar->conf_mutex);
return 0;
......@@ -2908,10 +2985,16 @@ static int ath10k_add_interface(struct ieee80211_hw *hw,
err_vdev_delete:
ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
ar->free_vdev_map |= 1 << arvif->vdev_id;
ar->free_vdev_map |= 1LL << arvif->vdev_id;
list_del(&arvif->list);
err:
if (arvif->beacon_buf) {
dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
arvif->beacon_buf, arvif->beacon_paddr);
arvif->beacon_buf = NULL;
}
mutex_unlock(&ar->conf_mutex);
return ret;
......@@ -2929,14 +3012,7 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
cancel_work_sync(&arvif->wep_key_work);
spin_lock_bh(&ar->data_lock);
if (arvif->beacon) {
dma_unmap_single(arvif->ar->dev,
ATH10K_SKB_CB(arvif->beacon)->paddr,
arvif->beacon->len, DMA_TO_DEVICE);
dev_kfree_skb_any(arvif->beacon);
arvif->beacon = NULL;
}
ath10k_mac_vif_beacon_cleanup(arvif);
spin_unlock_bh(&ar->data_lock);
ret = ath10k_spectral_vif_stop(arvif);
......@@ -2944,7 +3020,7 @@ static void ath10k_remove_interface(struct ieee80211_hw *hw,
ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n",
arvif->vdev_id, ret);
ar->free_vdev_map |= 1 << arvif->vdev_id;
ar->free_vdev_map |= 1LL << arvif->vdev_id;
list_del(&arvif->list);
if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
......@@ -3068,54 +3144,8 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
arvif->u.ap.hidden_ssid = info->hidden_ssid;
}
/*
* Firmware manages AP self-peer internally so make sure to not create
* it in driver. Otherwise AP self-peer deletion may timeout later.
*/
if (changed & BSS_CHANGED_BSSID &&
vif->type != NL80211_IFTYPE_AP) {
if (!is_zero_ether_addr(info->bssid)) {
ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac vdev %d create peer %pM\n",
arvif->vdev_id, info->bssid);
ret = ath10k_peer_create(ar, arvif->vdev_id,
info->bssid);
if (ret)
ath10k_warn(ar, "failed to add peer %pM for vdev %d when changing bssid: %i\n",
info->bssid, arvif->vdev_id, ret);
if (vif->type == NL80211_IFTYPE_STATION) {
/*
* this is never erased as we it for crypto key
* clearing; this is FW requirement
*/
ether_addr_copy(arvif->bssid, info->bssid);
ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac vdev %d start %pM\n",
arvif->vdev_id, info->bssid);
ret = ath10k_vdev_start(arvif);
if (ret) {
ath10k_warn(ar, "failed to start vdev %i: %d\n",
arvif->vdev_id, ret);
goto exit;
}
arvif->is_started = true;
}
/*
* Mac80211 does not keep IBSS bssid when leaving IBSS,
* so driver need to store it. It is needed when leaving
* IBSS in order to remove BSSID peer.
*/
if (vif->type == NL80211_IFTYPE_ADHOC)
memcpy(arvif->bssid, info->bssid,
ETH_ALEN);
}
}
if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid))
ether_addr_copy(arvif->bssid, info->bssid);
if (changed & BSS_CHANGED_BEACON_ENABLED)
ath10k_control_beaconing(arvif, info);
......@@ -3177,10 +3207,21 @@ static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
ath10k_monitor_stop(ar);
ath10k_bss_assoc(hw, vif, info);
ath10k_monitor_recalc(ar);
} else {
ath10k_bss_disassoc(hw, vif);
}
}
exit:
if (changed & BSS_CHANGED_TXPOWER) {
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev_id %i txpower %d\n",
arvif->vdev_id, info->txpower);
arvif->txpower = info->txpower;
ret = ath10k_mac_txpower_recalc(ar);
if (ret)
ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
}
mutex_unlock(&ar->conf_mutex);
}
......@@ -3453,7 +3494,7 @@ static void ath10k_sta_rc_update_wk(struct work_struct *wk)
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n",
sta->addr);
err = ath10k_station_assoc(ar, arvif, sta, true);
err = ath10k_station_assoc(ar, arvif->vif, sta, true);
if (err)
ath10k_warn(ar, "failed to reassociate station: %pM\n",
sta->addr);
......@@ -3489,8 +3530,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
mutex_lock(&ar->conf_mutex);
if (old_state == IEEE80211_STA_NOTEXIST &&
new_state == IEEE80211_STA_NONE &&
vif->type != NL80211_IFTYPE_STATION) {
new_state == IEEE80211_STA_NONE) {
/*
* New station addition.
*/
......@@ -3514,6 +3554,21 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
if (ret)
ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n",
sta->addr, arvif->vdev_id, ret);
if (vif->type == NL80211_IFTYPE_STATION) {
WARN_ON(arvif->is_started);
ret = ath10k_vdev_start(arvif);
if (ret) {
ath10k_warn(ar, "failed to start vdev %i: %d\n",
arvif->vdev_id, ret);
WARN_ON(ath10k_peer_delete(ar, arvif->vdev_id,
sta->addr));
goto exit;
}
arvif->is_started = true;
}
} else if ((old_state == IEEE80211_STA_NONE &&
new_state == IEEE80211_STA_NOTEXIST)) {
/*
......@@ -3522,13 +3577,23 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
ath10k_dbg(ar, ATH10K_DBG_MAC,
"mac vdev %d peer delete %pM (sta gone)\n",
arvif->vdev_id, sta->addr);
if (vif->type == NL80211_IFTYPE_STATION) {
WARN_ON(!arvif->is_started);
ret = ath10k_vdev_stop(arvif);
if (ret)
ath10k_warn(ar, "failed to stop vdev %i: %d\n",
arvif->vdev_id, ret);
arvif->is_started = false;
}
ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
if (ret)
ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n",
sta->addr, arvif->vdev_id, ret);
if (vif->type == NL80211_IFTYPE_STATION)
ath10k_bss_disassoc(hw, vif);
} else if (old_state == IEEE80211_STA_AUTH &&
new_state == IEEE80211_STA_ASSOC &&
(vif->type == NL80211_IFTYPE_AP ||
......@@ -3539,7 +3604,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n",
sta->addr);
ret = ath10k_station_assoc(ar, arvif, sta, false);
ret = ath10k_station_assoc(ar, vif, sta, false);
if (ret)
ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n",
sta->addr, arvif->vdev_id, ret);
......@@ -3553,7 +3618,7 @@ static int ath10k_sta_state(struct ieee80211_hw *hw,
ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n",
sta->addr);
ret = ath10k_station_disassoc(ar, arvif, sta);
ret = ath10k_station_disassoc(ar, vif, sta);
if (ret)
ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n",
sta->addr, arvif->vdev_id, ret);
......@@ -4456,6 +4521,9 @@ static const struct ieee80211_ops ath10k_ops = {
.sta_rc_update = ath10k_sta_rc_update,
.get_tsf = ath10k_get_tsf,
.ampdu_action = ath10k_ampdu_action,
.get_et_sset_count = ath10k_debug_get_et_sset_count,
.get_et_stats = ath10k_debug_get_et_stats,
.get_et_strings = ath10k_debug_get_et_strings,
CFG80211_TESTMODE_CMD(ath10k_tm_cmd)
......@@ -4800,15 +4868,6 @@ int ath10k_mac_register(struct ath10k *ar)
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP);
if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
/* TODO: Have to deal with 2x2 chips if/when the come out. */
ar->supp_tx_chainmask = TARGET_10X_TX_CHAIN_MASK;
ar->supp_rx_chainmask = TARGET_10X_RX_CHAIN_MASK;
} else {
ar->supp_tx_chainmask = TARGET_TX_CHAIN_MASK;
ar->supp_rx_chainmask = TARGET_RX_CHAIN_MASK;
}
ar->hw->wiphy->available_antennas_rx = ar->supp_rx_chainmask;
ar->hw->wiphy->available_antennas_tx = ar->supp_tx_chainmask;
......
......@@ -39,6 +39,7 @@ void ath10k_offchan_tx_work(struct work_struct *work);
void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar);
void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work);
void ath10k_halt(struct ath10k *ar);
void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif);
static inline struct ath10k_vif *ath10k_vif_to_arvif(struct ieee80211_vif *vif)
{
......
......@@ -485,6 +485,8 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
void *data_buf = NULL;
int i;
spin_lock_bh(&ar_pci->ce_lock);
ce_diag = ar_pci->ce_diag;
/*
......@@ -511,7 +513,7 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
nbytes = min_t(unsigned int, remaining_bytes,
DIAG_TRANSFER_LIMIT);
ret = ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data);
ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, ce_data);
if (ret != 0)
goto done;
......@@ -527,15 +529,15 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
address = TARG_CPU_SPACE_TO_CE_SPACE(ar, ar_pci->mem,
address);
ret = ath10k_ce_send(ce_diag, NULL, (u32)address, nbytes, 0,
0);
ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)address, nbytes, 0,
0);
if (ret)
goto done;
i = 0;
while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
&completed_nbytes,
&id) != 0) {
while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf,
&completed_nbytes,
&id) != 0) {
mdelay(1);
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
ret = -EBUSY;
......@@ -554,9 +556,9 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
}
i = 0;
while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
&completed_nbytes,
&id, &flags) != 0) {
while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
&completed_nbytes,
&id, &flags) != 0) {
mdelay(1);
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
......@@ -591,6 +593,8 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
dma_free_coherent(ar->dev, orig_nbytes, data_buf,
ce_data_base);
spin_unlock_bh(&ar_pci->ce_lock);
return ret;
}
......@@ -648,6 +652,8 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
dma_addr_t ce_data_base = 0;
int i;
spin_lock_bh(&ar_pci->ce_lock);
ce_diag = ar_pci->ce_diag;
/*
......@@ -688,7 +694,7 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
nbytes = min_t(int, remaining_bytes, DIAG_TRANSFER_LIMIT);
/* Set up to receive directly into Target(!) address */
ret = ath10k_ce_rx_post_buf(ce_diag, NULL, address);
ret = __ath10k_ce_rx_post_buf(ce_diag, NULL, address);
if (ret != 0)
goto done;
......@@ -696,15 +702,15 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
* Request CE to send caller-supplied data that
* was copied to bounce buffer to Target(!) address.
*/
ret = ath10k_ce_send(ce_diag, NULL, (u32)ce_data,
nbytes, 0, 0);
ret = ath10k_ce_send_nolock(ce_diag, NULL, (u32)ce_data,
nbytes, 0, 0);
if (ret != 0)
goto done;
i = 0;
while (ath10k_ce_completed_send_next(ce_diag, NULL, &buf,
&completed_nbytes,
&id) != 0) {
while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf,
&completed_nbytes,
&id) != 0) {
mdelay(1);
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
......@@ -724,9 +730,9 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
}
i = 0;
while (ath10k_ce_completed_recv_next(ce_diag, NULL, &buf,
&completed_nbytes,
&id, &flags) != 0) {
while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
&completed_nbytes,
&id, &flags) != 0) {
mdelay(1);
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
......@@ -760,6 +766,8 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
ath10k_warn(ar, "failed to write diag value at 0x%x: %d\n",
address, ret);
spin_unlock_bh(&ar_pci->ce_lock);
return ret;
}
......@@ -861,6 +869,12 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
}
skb_put(skb, nbytes);
ath10k_dbg(ar, ATH10K_DBG_PCI, "pci rx ce pipe %d len %d\n",
ce_state->id, skb->len);
ath10k_dbg_dump(ar, ATH10K_DBG_PCI_DUMP, NULL, "pci rx: ",
skb->data, skb->len);
cb->rx_completion(ar, skb, pipe_info->pipe_num);
}
......@@ -936,6 +950,12 @@ static int ath10k_pci_hif_tx_sg(struct ath10k *ar, u8 pipe_id,
return err;
}
static int ath10k_pci_hif_diag_read(struct ath10k *ar, u32 address, void *buf,
size_t buf_len)
{
return ath10k_pci_diag_read_mem(ar, address, buf, buf_len);
}
static u16 ath10k_pci_hif_get_free_queue_number(struct ath10k *ar, u8 pipe)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
......@@ -986,6 +1006,8 @@ static void ath10k_pci_fw_crashed_dump(struct ath10k *ar)
spin_lock_bh(&ar->data_lock);
ar->stats.fw_crash_counter++;
crash_data = ath10k_debug_get_new_fw_crash_data(ar);
if (crash_data)
......@@ -1121,14 +1143,37 @@ static void ath10k_pci_hif_get_default_pipe(struct ath10k *ar,
&dl_is_polled);
}
static void ath10k_pci_irq_disable(struct ath10k *ar)
static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int i;
u32 val;
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
val &= ~CORE_CTRL_PCIE_REG_31_MASK;
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
}
static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
{
u32 val;
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS);
val |= CORE_CTRL_PCIE_REG_31_MASK;
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS + CORE_CTRL_ADDRESS, val);
}
static void ath10k_pci_irq_disable(struct ath10k *ar)
{
ath10k_ce_disable_interrupts(ar);
ath10k_pci_disable_and_clear_legacy_irq(ar);
/* FIXME: How to mask all MSI interrupts? */
ath10k_pci_irq_msi_fw_mask(ar);
}
static void ath10k_pci_irq_sync(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
int i;
for (i = 0; i < max(1, ar_pci->num_msi_intrs); i++)
synchronize_irq(ar_pci->pdev->irq + i);
......@@ -1138,7 +1183,7 @@ static void ath10k_pci_irq_enable(struct ath10k *ar)
{
ath10k_ce_enable_interrupts(ar);
ath10k_pci_enable_legacy_irq(ar);
/* FIXME: How to unmask all MSI interrupts? */
ath10k_pci_irq_msi_fw_unmask(ar);
}
static int ath10k_pci_hif_start(struct ath10k *ar)
......@@ -1266,6 +1311,7 @@ static void ath10k_pci_hif_stop(struct ath10k *ar)
ath10k_pci_warm_reset(ar);
ath10k_pci_irq_disable(ar);
ath10k_pci_irq_sync(ar);
ath10k_pci_flush(ar);
}
......@@ -1569,23 +1615,40 @@ static int ath10k_pci_init_config(struct ath10k *ar)
return 0;
}
static int ath10k_pci_alloc_ce(struct ath10k *ar)
static int ath10k_pci_alloc_pipes(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_pci_pipe *pipe;
int i, ret;
for (i = 0; i < CE_COUNT; i++) {
ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i]);
pipe = &ar_pci->pipe_info[i];
pipe->ce_hdl = &ar_pci->ce_states[i];
pipe->pipe_num = i;
pipe->hif_ce_state = ar;
ret = ath10k_ce_alloc_pipe(ar, i, &host_ce_config_wlan[i],
ath10k_pci_ce_send_done,
ath10k_pci_ce_recv_data);
if (ret) {
ath10k_err(ar, "failed to allocate copy engine pipe %d: %d\n",
i, ret);
return ret;
}
/* Last CE is Diagnostic Window */
if (i == CE_COUNT - 1) {
ar_pci->ce_diag = pipe->ce_hdl;
continue;
}
pipe->buf_sz = (size_t)(host_ce_config_wlan[i].src_sz_max);
}
return 0;
}
static void ath10k_pci_free_ce(struct ath10k *ar)
static void ath10k_pci_free_pipes(struct ath10k *ar)
{
int i;
......@@ -1593,39 +1656,17 @@ static void ath10k_pci_free_ce(struct ath10k *ar)
ath10k_ce_free_pipe(ar, i);
}
static int ath10k_pci_ce_init(struct ath10k *ar)
static int ath10k_pci_init_pipes(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_pci_pipe *pipe_info;
const struct ce_attr *attr;
int pipe_num, ret;
int i, ret;
for (pipe_num = 0; pipe_num < CE_COUNT; pipe_num++) {
pipe_info = &ar_pci->pipe_info[pipe_num];
pipe_info->ce_hdl = &ar_pci->ce_states[pipe_num];
pipe_info->pipe_num = pipe_num;
pipe_info->hif_ce_state = ar;
attr = &host_ce_config_wlan[pipe_num];
ret = ath10k_ce_init_pipe(ar, pipe_num, attr,
ath10k_pci_ce_send_done,
ath10k_pci_ce_recv_data);
for (i = 0; i < CE_COUNT; i++) {
ret = ath10k_ce_init_pipe(ar, i, &host_ce_config_wlan[i]);
if (ret) {
ath10k_err(ar, "failed to initialize copy engine pipe %d: %d\n",
pipe_num, ret);
i, ret);
return ret;
}
if (pipe_num == CE_COUNT - 1) {
/*
* Reserve the ultimate CE for
* diagnostic Window support
*/
ar_pci->ce_diag = pipe_info->ce_hdl;
continue;
}
pipe_info->buf_sz = (size_t)(attr->src_sz_max);
}
return 0;
......@@ -1672,6 +1713,12 @@ static int ath10k_pci_warm_reset(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot warm reset\n");
spin_lock_bh(&ar->data_lock);
ar->stats.fw_warm_reset_counter++;
spin_unlock_bh(&ar->data_lock);
/* debug */
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
PCIE_INTR_CAUSE_ADDRESS);
......@@ -1773,7 +1820,7 @@ static int __ath10k_pci_hif_power_up(struct ath10k *ar, bool cold_reset)
goto err;
}
ret = ath10k_pci_ce_init(ar);
ret = ath10k_pci_init_pipes(ar);
if (ret) {
ath10k_err(ar, "failed to initialize CE: %d\n", ret);
goto err;
......@@ -1921,6 +1968,7 @@ static int ath10k_pci_hif_resume(struct ath10k *ar)
static const struct ath10k_hif_ops ath10k_pci_hif_ops = {
.tx_sg = ath10k_pci_hif_tx_sg,
.diag_read = ath10k_pci_hif_diag_read,
.exchange_bmi_msg = ath10k_pci_hif_exchange_bmi_msg,
.start = ath10k_pci_hif_start,
.stop = ath10k_pci_hif_stop,
......@@ -2250,14 +2298,14 @@ static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
if (ar_pci->num_msi_intrs == 0)
/* Fix potential race by repeating CORE_BASE writes */
ath10k_pci_write32(ar, SOC_CORE_BASE_ADDRESS +
PCIE_INTR_ENABLE_ADDRESS,
PCIE_INTR_FIRMWARE_MASK |
PCIE_INTR_CE_MASK_ALL);
ath10k_pci_enable_legacy_irq(ar);
mdelay(10);
} while (time_before(jiffies, timeout));
ath10k_pci_disable_and_clear_legacy_irq(ar);
ath10k_pci_irq_msi_fw_mask(ar);
if (val == 0xffffffff) {
ath10k_err(ar, "failed to read device register, device is gone\n");
return -EIO;
......@@ -2287,6 +2335,12 @@ static int ath10k_pci_cold_reset(struct ath10k *ar)
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot cold reset\n");
spin_lock_bh(&ar->data_lock);
ar->stats.fw_cold_reset_counter++;
spin_unlock_bh(&ar->data_lock);
/* Put Target, including PCIe, into RESET. */
val = ath10k_pci_reg_read32(ar, SOC_GLOBAL_RESET_ADDRESS);
val |= 1;
......@@ -2400,6 +2454,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
u32 chip_id;
ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev,
ATH10K_BUS_PCI,
&ath10k_pci_hif_ops);
if (!ar) {
dev_err(&pdev->dev, "failed to allocate core\n");
......@@ -2435,7 +2490,7 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_sleep;
}
ret = ath10k_pci_alloc_ce(ar);
ret = ath10k_pci_alloc_pipes(ar);
if (ret) {
ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
ret);
......@@ -2443,25 +2498,12 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
}
ath10k_pci_ce_deinit(ar);
ret = ath10k_ce_disable_interrupts(ar);
if (ret) {
ath10k_err(ar, "failed to disable copy engine interrupts: %d\n",
ret);
goto err_free_ce;
}
/* Workaround: There's no known way to mask all possible interrupts via
* device CSR. The only way to make sure device doesn't assert
* interrupts is to reset it. Interrupts are then disabled on host
* after handlers are registered.
*/
ath10k_pci_warm_reset(ar);
ath10k_pci_irq_disable(ar);
ret = ath10k_pci_init_irq(ar);
if (ret) {
ath10k_err(ar, "failed to init irqs: %d\n", ret);
goto err_free_ce;
goto err_free_pipes;
}
ath10k_info(ar, "pci irq %s interrupts %d irq_mode %d reset_mode %d\n",
......@@ -2474,9 +2516,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_deinit_irq;
}
/* This shouldn't race as the device has been reset above. */
ath10k_pci_irq_disable(ar);
ret = ath10k_core_register(ar, chip_id);
if (ret) {
ath10k_err(ar, "failed to register driver core: %d\n", ret);
......@@ -2492,8 +2531,8 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
err_deinit_irq:
ath10k_pci_deinit_irq(ar);
err_free_ce:
ath10k_pci_free_ce(ar);
err_free_pipes:
ath10k_pci_free_pipes(ar);
err_sleep:
ath10k_pci_sleep(ar);
......@@ -2527,7 +2566,7 @@ static void ath10k_pci_remove(struct pci_dev *pdev)
ath10k_pci_kill_tasklet(ar);
ath10k_pci_deinit_irq(ar);
ath10k_pci_ce_deinit(ar);
ath10k_pci_free_ce(ar);
ath10k_pci_free_pipes(ar);
ath10k_pci_sleep(ar);
ath10k_pci_release(ar);
ath10k_core_destroy(ar);
......@@ -2565,5 +2604,7 @@ module_exit(ath10k_pci_exit);
MODULE_AUTHOR("Qualcomm Atheros");
MODULE_DESCRIPTION("Driver support for Atheros QCA988X PCIe devices");
MODULE_LICENSE("Dual BSD/GPL");
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_3_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_FW_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API2_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" ATH10K_FW_API3_FILE);
MODULE_FIRMWARE(QCA988X_HW_2_0_FW_DIR "/" QCA988X_HW_2_0_BOARD_DATA_FILE);
......@@ -56,14 +56,14 @@ static uint8_t get_max_exp(s8 max_index, u16 max_magnitude, size_t bin_len,
}
int ath10k_spectral_process_fft(struct ath10k *ar,
struct wmi_single_phyerr_rx_event *event,
struct phyerr_fft_report *fftr,
const struct wmi_phyerr *phyerr,
const struct phyerr_fft_report *fftr,
size_t bin_len, u64 tsf)
{
struct fft_sample_ath10k *fft_sample;
u8 buf[sizeof(*fft_sample) + SPECTRAL_ATH10K_MAX_NUM_BINS];
u16 freq1, freq2, total_gain_db, base_pwr_db, length, peak_mag;
u32 reg0, reg1, nf_list1, nf_list2;
u32 reg0, reg1;
u8 chain_idx, *bins;
int dc_pos;
......@@ -82,7 +82,7 @@ int ath10k_spectral_process_fft(struct ath10k *ar,
/* TODO: there might be a reason why the hardware reports 20/40/80 MHz,
* but the results/plots suggest that its actually 22/44/88 MHz.
*/
switch (event->hdr.chan_width_mhz) {
switch (phyerr->chan_width_mhz) {
case 20:
fft_sample->chan_width_mhz = 22;
break;
......@@ -101,7 +101,7 @@ int ath10k_spectral_process_fft(struct ath10k *ar,
fft_sample->chan_width_mhz = 88;
break;
default:
fft_sample->chan_width_mhz = event->hdr.chan_width_mhz;
fft_sample->chan_width_mhz = phyerr->chan_width_mhz;
}
fft_sample->relpwr_db = MS(reg1, SEARCH_FFT_REPORT_REG1_RELPWR_DB);
......@@ -110,36 +110,22 @@ int ath10k_spectral_process_fft(struct ath10k *ar,
peak_mag = MS(reg1, SEARCH_FFT_REPORT_REG1_PEAK_MAG);
fft_sample->max_magnitude = __cpu_to_be16(peak_mag);
fft_sample->max_index = MS(reg0, SEARCH_FFT_REPORT_REG0_PEAK_SIDX);
fft_sample->rssi = event->hdr.rssi_combined;
fft_sample->rssi = phyerr->rssi_combined;
total_gain_db = MS(reg0, SEARCH_FFT_REPORT_REG0_TOTAL_GAIN_DB);
base_pwr_db = MS(reg0, SEARCH_FFT_REPORT_REG0_BASE_PWR_DB);
fft_sample->total_gain_db = __cpu_to_be16(total_gain_db);
fft_sample->base_pwr_db = __cpu_to_be16(base_pwr_db);
freq1 = __le16_to_cpu(event->hdr.freq1);
freq2 = __le16_to_cpu(event->hdr.freq2);
freq1 = __le16_to_cpu(phyerr->freq1);
freq2 = __le16_to_cpu(phyerr->freq2);
fft_sample->freq1 = __cpu_to_be16(freq1);
fft_sample->freq2 = __cpu_to_be16(freq2);
nf_list1 = __le32_to_cpu(event->hdr.nf_list_1);
nf_list2 = __le32_to_cpu(event->hdr.nf_list_2);
chain_idx = MS(reg0, SEARCH_FFT_REPORT_REG0_FFT_CHN_IDX);
switch (chain_idx) {
case 0:
fft_sample->noise = __cpu_to_be16(nf_list1 & 0xffffu);
break;
case 1:
fft_sample->noise = __cpu_to_be16((nf_list1 >> 16) & 0xffffu);
break;
case 2:
fft_sample->noise = __cpu_to_be16(nf_list2 & 0xffffu);
break;
case 3:
fft_sample->noise = __cpu_to_be16((nf_list2 >> 16) & 0xffffu);
break;
}
fft_sample->noise = __cpu_to_be16(
__le16_to_cpu(phyerr->nf_chains[chain_idx]));
bins = (u8 *)fftr;
bins += sizeof(*fftr);
......
......@@ -47,8 +47,8 @@ enum ath10k_spectral_mode {
#ifdef CONFIG_ATH10K_DEBUGFS
int ath10k_spectral_process_fft(struct ath10k *ar,
struct wmi_single_phyerr_rx_event *event,
struct phyerr_fft_report *fftr,
const struct wmi_phyerr *phyerr,
const struct phyerr_fft_report *fftr,
size_t bin_len, u64 tsf);
int ath10k_spectral_start(struct ath10k *ar);
int ath10k_spectral_vif_stop(struct ath10k_vif *arvif);
......@@ -59,8 +59,8 @@ void ath10k_spectral_destroy(struct ath10k *ar);
static inline int
ath10k_spectral_process_fft(struct ath10k *ar,
struct wmi_single_phyerr_rx_event *event,
struct phyerr_fft_report *fftr,
const struct wmi_phyerr *phyerr,
const struct phyerr_fft_report *fftr,
size_t bin_len, u64 tsf)
{
return 0;
......
......@@ -254,6 +254,169 @@ TRACE_EVENT(ath10k_wmi_dbglog,
)
);
TRACE_EVENT(ath10k_htt_pktlog,
TP_PROTO(struct ath10k *ar, void *buf, u16 buf_len),
TP_ARGS(ar, buf, buf_len),
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__field(u16, buf_len)
__dynamic_array(u8, pktlog, buf_len)
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
__entry->buf_len = buf_len;
memcpy(__get_dynamic_array(pktlog), buf, buf_len);
),
TP_printk(
"%s %s size %hu",
__get_str(driver),
__get_str(device),
__entry->buf_len
)
);
TRACE_EVENT(ath10k_htt_rx_desc,
TP_PROTO(struct ath10k *ar, u32 tsf, void *rxdesc, u16 len),
TP_ARGS(ar, tsf, rxdesc, len),
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__field(u32, tsf)
__field(u16, len)
__dynamic_array(u8, rxdesc, len)
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
__entry->tsf = tsf;
__entry->len = len;
memcpy(__get_dynamic_array(rxdesc), rxdesc, len);
),
TP_printk(
"%s %s %u len %hu",
__get_str(driver),
__get_str(device),
__entry->tsf,
__entry->len
)
);
TRACE_EVENT(ath10k_htt_tx,
TP_PROTO(struct ath10k *ar, u16 msdu_id, u16 msdu_len,
u8 vdev_id, u8 tid),
TP_ARGS(ar, msdu_id, msdu_len, vdev_id, tid),
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__field(u16, msdu_id)
__field(u16, msdu_len)
__field(u8, vdev_id)
__field(u8, tid)
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
__entry->msdu_id = msdu_id;
__entry->msdu_len = msdu_len;
__entry->vdev_id = vdev_id;
__entry->tid = tid;
),
TP_printk(
"%s %s msdu_id %d msdu_len %d vdev_id %d tid %d",
__get_str(driver),
__get_str(device),
__entry->msdu_id,
__entry->msdu_len,
__entry->vdev_id,
__entry->tid
)
);
TRACE_EVENT(ath10k_txrx_tx_unref,
TP_PROTO(struct ath10k *ar, u16 msdu_id),
TP_ARGS(ar, msdu_id),
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__field(u16, msdu_id)
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
__entry->msdu_id = msdu_id;
),
TP_printk(
"%s %s msdu_id %d",
__get_str(driver),
__get_str(device),
__entry->msdu_id
)
);
DECLARE_EVENT_CLASS(ath10k_data_event,
TP_PROTO(struct ath10k *ar, void *data, size_t len),
TP_ARGS(ar, data, len),
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__field(size_t, len)
__dynamic_array(u8, data, len)
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
__entry->len = len;
memcpy(__get_dynamic_array(data), data, len);
),
TP_printk(
"%s %s len %zu\n",
__get_str(driver),
__get_str(device),
__entry->len
)
);
DEFINE_EVENT(ath10k_data_event, ath10k_htt_tx_msdu,
TP_PROTO(struct ath10k *ar, void *data, size_t len),
TP_ARGS(ar, data, len)
);
DEFINE_EVENT(ath10k_data_event, ath10k_htt_rx_pop_msdu,
TP_PROTO(struct ath10k *ar, void *data, size_t len),
TP_ARGS(ar, data, len)
);
DEFINE_EVENT(ath10k_data_event, ath10k_wmi_mgmt_tx,
TP_PROTO(struct ath10k *ar, void *data, size_t len),
TP_ARGS(ar, data, len)
);
DEFINE_EVENT(ath10k_data_event, ath10k_wmi_bcn_tx,
TP_PROTO(struct ath10k *ar, void *data, size_t len),
TP_ARGS(ar, data, len)
);
#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
/* we don't want to use include/trace/events */
......
......@@ -78,6 +78,7 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
info = IEEE80211_SKB_CB(msdu);
memset(&info->status, 0, sizeof(info->status));
trace_ath10k_txrx_tx_unref(ar, tx_done->msdu_id);
if (tx_done->discard) {
ieee80211_free_txskb(htt->ar->hw, msdu);
......
......@@ -609,6 +609,40 @@ static struct wmi_cmd_map wmi_10_2_cmd_map = {
.gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
};
static void
ath10k_wmi_put_wmi_channel(struct wmi_channel *ch,
const struct wmi_channel_arg *arg)
{
u32 flags = 0;
memset(ch, 0, sizeof(*ch));
if (arg->passive)
flags |= WMI_CHAN_FLAG_PASSIVE;
if (arg->allow_ibss)
flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED;
if (arg->allow_ht)
flags |= WMI_CHAN_FLAG_ALLOW_HT;
if (arg->allow_vht)
flags |= WMI_CHAN_FLAG_ALLOW_VHT;
if (arg->ht40plus)
flags |= WMI_CHAN_FLAG_HT40_PLUS;
if (arg->chan_radar)
flags |= WMI_CHAN_FLAG_DFS;
ch->mhz = __cpu_to_le32(arg->freq);
ch->band_center_freq1 = __cpu_to_le32(arg->band_center_freq1);
ch->band_center_freq2 = 0;
ch->min_power = arg->min_power;
ch->max_power = arg->max_power;
ch->reg_power = arg->max_reg_power;
ch->antenna_max = arg->max_antenna_gain;
/* mode & flags share storage */
ch->mode = arg->mode;
ch->flags |= __cpu_to_le32(flags);
}
int ath10k_wmi_wait_for_service_ready(struct ath10k *ar)
{
int ret;
......@@ -800,6 +834,7 @@ int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb)
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi mgmt tx skb %p len %d ftype %02x stype %02x\n",
wmi_skb, wmi_skb->len, fc & IEEE80211_FCTL_FTYPE,
fc & IEEE80211_FCTL_STYPE);
trace_ath10k_wmi_mgmt_tx(ar, skb->data, skb->len);
/* Send the management frame buffer to the target */
ret = ath10k_wmi_cmd_send(ar, wmi_skb, ar->wmi.cmd->mgmt_tx_cmdid);
......@@ -1079,7 +1114,6 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
struct wmi_mgmt_rx_event_v2 *ev_v2;
struct wmi_mgmt_rx_hdr_v1 *ev_hdr;
struct ieee80211_rx_status *status = IEEE80211_SKB_RXCB(skb);
struct ieee80211_channel *ch;
struct ieee80211_hdr *hdr;
u32 rx_status;
u32 channel;
......@@ -1132,25 +1166,26 @@ static int ath10k_wmi_event_mgmt_rx(struct ath10k *ar, struct sk_buff *skb)
if (rx_status & WMI_RX_STATUS_ERR_MIC)
status->flag |= RX_FLAG_MMIC_ERROR;
/* HW can Rx CCK rates on 5GHz. In that case phy_mode is set to
/* Hardware can Rx CCK rates on 5GHz. In that case phy_mode is set to
* MODE_11B. This means phy_mode is not a reliable source for the band
* of mgmt rx. */
ch = ar->scan_channel;
if (!ch)
ch = ar->rx_channel;
if (ch) {
status->band = ch->band;
if (phy_mode == MODE_11B &&
status->band == IEEE80211_BAND_5GHZ)
ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
* of mgmt rx.
*/
if (channel >= 1 && channel <= 14) {
status->band = IEEE80211_BAND_2GHZ;
} else if (channel >= 36 && channel <= 165) {
status->band = IEEE80211_BAND_5GHZ;
} else {
ath10k_warn(ar, "using (unreliable) phy_mode to extract band for mgmt rx\n");
status->band = phy_mode_to_band(phy_mode);
/* Shouldn't happen unless list of advertised channels to
* mac80211 has been changed.
*/
WARN_ON_ONCE(1);
dev_kfree_skb(skb);
return 0;
}
if (phy_mode == MODE_11B && status->band == IEEE80211_BAND_5GHZ)
ath10k_dbg(ar, ATH10K_DBG_MGMT, "wmi mgmt rx 11b (CCK) on 5GHz\n");
status->freq = ieee80211_channel_to_frequency(channel, status->band);
status->signal = snr + ATH10K_DEFAULT_NOISE_FLOOR;
status->rate_idx = get_rate_idx(rate, status->band);
......@@ -1295,14 +1330,196 @@ static int ath10k_wmi_event_debug_mesg(struct ath10k *ar, struct sk_buff *skb)
return 0;
}
static void ath10k_wmi_pull_pdev_stats(const struct wmi_pdev_stats *src,
struct ath10k_fw_stats_pdev *dst)
{
const struct wal_dbg_tx_stats *tx = &src->wal.tx;
const struct wal_dbg_rx_stats *rx = &src->wal.rx;
dst->ch_noise_floor = __le32_to_cpu(src->chan_nf);
dst->tx_frame_count = __le32_to_cpu(src->tx_frame_count);
dst->rx_frame_count = __le32_to_cpu(src->rx_frame_count);
dst->rx_clear_count = __le32_to_cpu(src->rx_clear_count);
dst->cycle_count = __le32_to_cpu(src->cycle_count);
dst->phy_err_count = __le32_to_cpu(src->phy_err_count);
dst->chan_tx_power = __le32_to_cpu(src->chan_tx_pwr);
dst->comp_queued = __le32_to_cpu(tx->comp_queued);
dst->comp_delivered = __le32_to_cpu(tx->comp_delivered);
dst->msdu_enqued = __le32_to_cpu(tx->msdu_enqued);
dst->mpdu_enqued = __le32_to_cpu(tx->mpdu_enqued);
dst->wmm_drop = __le32_to_cpu(tx->wmm_drop);
dst->local_enqued = __le32_to_cpu(tx->local_enqued);
dst->local_freed = __le32_to_cpu(tx->local_freed);
dst->hw_queued = __le32_to_cpu(tx->hw_queued);
dst->hw_reaped = __le32_to_cpu(tx->hw_reaped);
dst->underrun = __le32_to_cpu(tx->underrun);
dst->tx_abort = __le32_to_cpu(tx->tx_abort);
dst->mpdus_requed = __le32_to_cpu(tx->mpdus_requed);
dst->tx_ko = __le32_to_cpu(tx->tx_ko);
dst->data_rc = __le32_to_cpu(tx->data_rc);
dst->self_triggers = __le32_to_cpu(tx->self_triggers);
dst->sw_retry_failure = __le32_to_cpu(tx->sw_retry_failure);
dst->illgl_rate_phy_err = __le32_to_cpu(tx->illgl_rate_phy_err);
dst->pdev_cont_xretry = __le32_to_cpu(tx->pdev_cont_xretry);
dst->pdev_tx_timeout = __le32_to_cpu(tx->pdev_tx_timeout);
dst->pdev_resets = __le32_to_cpu(tx->pdev_resets);
dst->phy_underrun = __le32_to_cpu(tx->phy_underrun);
dst->txop_ovf = __le32_to_cpu(tx->txop_ovf);
dst->mid_ppdu_route_change = __le32_to_cpu(rx->mid_ppdu_route_change);
dst->status_rcvd = __le32_to_cpu(rx->status_rcvd);
dst->r0_frags = __le32_to_cpu(rx->r0_frags);
dst->r1_frags = __le32_to_cpu(rx->r1_frags);
dst->r2_frags = __le32_to_cpu(rx->r2_frags);
dst->r3_frags = __le32_to_cpu(rx->r3_frags);
dst->htt_msdus = __le32_to_cpu(rx->htt_msdus);
dst->htt_mpdus = __le32_to_cpu(rx->htt_mpdus);
dst->loc_msdus = __le32_to_cpu(rx->loc_msdus);
dst->loc_mpdus = __le32_to_cpu(rx->loc_mpdus);
dst->oversize_amsdu = __le32_to_cpu(rx->oversize_amsdu);
dst->phy_errs = __le32_to_cpu(rx->phy_errs);
dst->phy_err_drop = __le32_to_cpu(rx->phy_err_drop);
dst->mpdu_errs = __le32_to_cpu(rx->mpdu_errs);
}
static void ath10k_wmi_pull_peer_stats(const struct wmi_peer_stats *src,
struct ath10k_fw_stats_peer *dst)
{
ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
}
static int ath10k_wmi_main_pull_fw_stats(struct ath10k *ar,
struct sk_buff *skb,
struct ath10k_fw_stats *stats)
{
const struct wmi_stats_event *ev = (void *)skb->data;
u32 num_pdev_stats, num_vdev_stats, num_peer_stats;
int i;
if (!skb_pull(skb, sizeof(*ev)))
return -EPROTO;
num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
for (i = 0; i < num_pdev_stats; i++) {
const struct wmi_pdev_stats *src;
struct ath10k_fw_stats_pdev *dst;
src = (void *)skb->data;
if (!skb_pull(skb, sizeof(*src)))
return -EPROTO;
dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
if (!dst)
continue;
ath10k_wmi_pull_pdev_stats(src, dst);
list_add_tail(&dst->list, &stats->pdevs);
}
/* fw doesn't implement vdev stats */
for (i = 0; i < num_peer_stats; i++) {
const struct wmi_peer_stats *src;
struct ath10k_fw_stats_peer *dst;
src = (void *)skb->data;
if (!skb_pull(skb, sizeof(*src)))
return -EPROTO;
dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
if (!dst)
continue;
ath10k_wmi_pull_peer_stats(src, dst);
list_add_tail(&dst->list, &stats->peers);
}
return 0;
}
static int ath10k_wmi_10x_pull_fw_stats(struct ath10k *ar,
struct sk_buff *skb,
struct ath10k_fw_stats *stats)
{
const struct wmi_stats_event *ev = (void *)skb->data;
u32 num_pdev_stats, num_vdev_stats, num_peer_stats;
int i;
if (!skb_pull(skb, sizeof(*ev)))
return -EPROTO;
num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
for (i = 0; i < num_pdev_stats; i++) {
const struct wmi_10x_pdev_stats *src;
struct ath10k_fw_stats_pdev *dst;
src = (void *)skb->data;
if (!skb_pull(skb, sizeof(*src)))
return -EPROTO;
dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
if (!dst)
continue;
ath10k_wmi_pull_pdev_stats(&src->old, dst);
dst->ack_rx_bad = __le32_to_cpu(src->ack_rx_bad);
dst->rts_bad = __le32_to_cpu(src->rts_bad);
dst->rts_good = __le32_to_cpu(src->rts_good);
dst->fcs_bad = __le32_to_cpu(src->fcs_bad);
dst->no_beacons = __le32_to_cpu(src->no_beacons);
dst->mib_int_count = __le32_to_cpu(src->mib_int_count);
list_add_tail(&dst->list, &stats->pdevs);
}
/* fw doesn't implement vdev stats */
for (i = 0; i < num_peer_stats; i++) {
const struct wmi_10x_peer_stats *src;
struct ath10k_fw_stats_peer *dst;
src = (void *)skb->data;
if (!skb_pull(skb, sizeof(*src)))
return -EPROTO;
dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
if (!dst)
continue;
ath10k_wmi_pull_peer_stats(&src->old, dst);
dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
list_add_tail(&dst->list, &stats->peers);
}
return 0;
}
int ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
struct ath10k_fw_stats *stats)
{
if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
return ath10k_wmi_10x_pull_fw_stats(ar, skb, stats);
else
return ath10k_wmi_main_pull_fw_stats(ar, skb, stats);
}
static void ath10k_wmi_event_update_stats(struct ath10k *ar,
struct sk_buff *skb)
{
struct wmi_stats_event *ev = (struct wmi_stats_event *)skb->data;
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
ath10k_debug_read_target_stats(ar, ev);
ath10k_debug_fw_stats_process(ar, skb);
}
static void ath10k_wmi_event_vdev_start_resp(struct ath10k *ar,
......@@ -1579,6 +1796,7 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
struct wmi_bcn_info *bcn_info;
struct ath10k_vif *arvif;
struct sk_buff *bcn;
dma_addr_t paddr;
int ret, vdev_id = 0;
ev = (struct wmi_host_swba_event *)skb->data;
......@@ -1647,27 +1865,35 @@ static void ath10k_wmi_event_host_swba(struct ath10k *ar, struct sk_buff *skb)
ath10k_warn(ar, "SWBA overrun on vdev %d\n",
arvif->vdev_id);
dma_unmap_single(arvif->ar->dev,
ATH10K_SKB_CB(arvif->beacon)->paddr,
arvif->beacon->len, DMA_TO_DEVICE);
dev_kfree_skb_any(arvif->beacon);
arvif->beacon = NULL;
ath10k_mac_vif_beacon_free(arvif);
}
ATH10K_SKB_CB(bcn)->paddr = dma_map_single(arvif->ar->dev,
bcn->data, bcn->len,
DMA_TO_DEVICE);
ret = dma_mapping_error(arvif->ar->dev,
ATH10K_SKB_CB(bcn)->paddr);
if (ret) {
ath10k_warn(ar, "failed to map beacon: %d\n", ret);
dev_kfree_skb_any(bcn);
goto skip;
if (!arvif->beacon_buf) {
paddr = dma_map_single(arvif->ar->dev, bcn->data,
bcn->len, DMA_TO_DEVICE);
ret = dma_mapping_error(arvif->ar->dev, paddr);
if (ret) {
ath10k_warn(ar, "failed to map beacon: %d\n",
ret);
dev_kfree_skb_any(bcn);
goto skip;
}
ATH10K_SKB_CB(bcn)->paddr = paddr;
} else {
if (bcn->len > IEEE80211_MAX_FRAME_LEN) {
ath10k_warn(ar, "trimming beacon %d -> %d bytes!\n",
bcn->len, IEEE80211_MAX_FRAME_LEN);
skb_trim(bcn, IEEE80211_MAX_FRAME_LEN);
}
memcpy(arvif->beacon_buf, bcn->data, bcn->len);
ATH10K_SKB_CB(bcn)->paddr = arvif->beacon_paddr;
}
arvif->beacon = bcn;
arvif->beacon_sent = false;
trace_ath10k_wmi_bcn_tx(ar, bcn->data, bcn->len);
ath10k_wmi_tx_beacon_nowait(arvif);
skip:
spin_unlock_bh(&ar->data_lock);
......@@ -1681,8 +1907,8 @@ static void ath10k_wmi_event_tbttoffset_update(struct ath10k *ar,
}
static void ath10k_dfs_radar_report(struct ath10k *ar,
struct wmi_single_phyerr_rx_event *event,
struct phyerr_radar_report *rr,
const struct wmi_phyerr *phyerr,
const struct phyerr_radar_report *rr,
u64 tsf)
{
u32 reg0, reg1, tsf32l;
......@@ -1715,12 +1941,12 @@ static void ath10k_dfs_radar_report(struct ath10k *ar,
return;
/* report event to DFS pattern detector */
tsf32l = __le32_to_cpu(event->hdr.tsf_timestamp);
tsf32l = __le32_to_cpu(phyerr->tsf_timestamp);
tsf64 = tsf & (~0xFFFFFFFFULL);
tsf64 |= tsf32l;
width = MS(reg1, RADAR_REPORT_REG1_PULSE_DUR);
rssi = event->hdr.rssi_combined;
rssi = phyerr->rssi_combined;
/* hardware store this as 8 bit signed value,
* set to zero if negative number
......@@ -1759,8 +1985,8 @@ static void ath10k_dfs_radar_report(struct ath10k *ar,
}
static int ath10k_dfs_fft_report(struct ath10k *ar,
struct wmi_single_phyerr_rx_event *event,
struct phyerr_fft_report *fftr,
const struct wmi_phyerr *phyerr,
const struct phyerr_fft_report *fftr,
u64 tsf)
{
u32 reg0, reg1;
......@@ -1768,7 +1994,7 @@ static int ath10k_dfs_fft_report(struct ath10k *ar,
reg0 = __le32_to_cpu(fftr->reg0);
reg1 = __le32_to_cpu(fftr->reg1);
rssi = event->hdr.rssi_combined;
rssi = phyerr->rssi_combined;
ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
"wmi phyerr fft report total_gain_db %d base_pwr_db %d fft_chn_idx %d peak_sidx %d\n",
......@@ -1797,20 +2023,20 @@ static int ath10k_dfs_fft_report(struct ath10k *ar,
}
static void ath10k_wmi_event_dfs(struct ath10k *ar,
struct wmi_single_phyerr_rx_event *event,
const struct wmi_phyerr *phyerr,
u64 tsf)
{
int buf_len, tlv_len, res, i = 0;
struct phyerr_tlv *tlv;
struct phyerr_radar_report *rr;
struct phyerr_fft_report *fftr;
u8 *tlv_buf;
const struct phyerr_tlv *tlv;
const struct phyerr_radar_report *rr;
const struct phyerr_fft_report *fftr;
const u8 *tlv_buf;
buf_len = __le32_to_cpu(event->hdr.buf_len);
buf_len = __le32_to_cpu(phyerr->buf_len);
ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
"wmi event dfs err_code %d rssi %d tsfl 0x%X tsf64 0x%llX len %d\n",
event->hdr.phy_err_code, event->hdr.rssi_combined,
__le32_to_cpu(event->hdr.tsf_timestamp), tsf, buf_len);
phyerr->phy_err_code, phyerr->rssi_combined,
__le32_to_cpu(phyerr->tsf_timestamp), tsf, buf_len);
/* Skip event if DFS disabled */
if (!config_enabled(CONFIG_ATH10K_DFS_CERTIFIED))
......@@ -1825,9 +2051,9 @@ static void ath10k_wmi_event_dfs(struct ath10k *ar,
return;
}
tlv = (struct phyerr_tlv *)&event->bufp[i];
tlv = (struct phyerr_tlv *)&phyerr->buf[i];
tlv_len = __le16_to_cpu(tlv->len);
tlv_buf = &event->bufp[i + sizeof(*tlv)];
tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
ath10k_dbg(ar, ATH10K_DBG_REGULATORY,
"wmi event dfs tlv_len %d tlv_tag 0x%02X tlv_sig 0x%02X\n",
tlv_len, tlv->tag, tlv->sig);
......@@ -1841,7 +2067,7 @@ static void ath10k_wmi_event_dfs(struct ath10k *ar,
}
rr = (struct phyerr_radar_report *)tlv_buf;
ath10k_dfs_radar_report(ar, event, rr, tsf);
ath10k_dfs_radar_report(ar, phyerr, rr, tsf);
break;
case PHYERR_TLV_TAG_SEARCH_FFT_REPORT:
if (i + sizeof(*tlv) + sizeof(*fftr) > buf_len) {
......@@ -1851,7 +2077,7 @@ static void ath10k_wmi_event_dfs(struct ath10k *ar,
}
fftr = (struct phyerr_fft_report *)tlv_buf;
res = ath10k_dfs_fft_report(ar, event, fftr, tsf);
res = ath10k_dfs_fft_report(ar, phyerr, fftr, tsf);
if (res)
return;
break;
......@@ -1863,16 +2089,16 @@ static void ath10k_wmi_event_dfs(struct ath10k *ar,
static void
ath10k_wmi_event_spectral_scan(struct ath10k *ar,
struct wmi_single_phyerr_rx_event *event,
const struct wmi_phyerr *phyerr,
u64 tsf)
{
int buf_len, tlv_len, res, i = 0;
struct phyerr_tlv *tlv;
u8 *tlv_buf;
struct phyerr_fft_report *fftr;
const void *tlv_buf;
const struct phyerr_fft_report *fftr;
size_t fftr_len;
buf_len = __le32_to_cpu(event->hdr.buf_len);
buf_len = __le32_to_cpu(phyerr->buf_len);
while (i < buf_len) {
if (i + sizeof(*tlv) > buf_len) {
......@@ -1881,9 +2107,9 @@ ath10k_wmi_event_spectral_scan(struct ath10k *ar,
return;
}
tlv = (struct phyerr_tlv *)&event->bufp[i];
tlv = (struct phyerr_tlv *)&phyerr->buf[i];
tlv_len = __le16_to_cpu(tlv->len);
tlv_buf = &event->bufp[i + sizeof(*tlv)];
tlv_buf = &phyerr->buf[i + sizeof(*tlv)];
if (i + sizeof(*tlv) + tlv_len > buf_len) {
ath10k_warn(ar, "failed to parse phyerr tlv payload at byte %d\n",
......@@ -1900,8 +2126,8 @@ ath10k_wmi_event_spectral_scan(struct ath10k *ar,
}
fftr_len = tlv_len - sizeof(*fftr);
fftr = (struct phyerr_fft_report *)tlv_buf;
res = ath10k_spectral_process_fft(ar, event,
fftr = tlv_buf;
res = ath10k_spectral_process_fft(ar, phyerr,
fftr, fftr_len,
tsf);
if (res < 0) {
......@@ -1918,8 +2144,8 @@ ath10k_wmi_event_spectral_scan(struct ath10k *ar,
static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_comb_phyerr_rx_event *comb_event;
struct wmi_single_phyerr_rx_event *event;
const struct wmi_phyerr_event *ev;
const struct wmi_phyerr *phyerr;
u32 count, i, buf_len, phy_err_code;
u64 tsf;
int left_len = skb->len;
......@@ -1927,38 +2153,38 @@ static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
ATH10K_DFS_STAT_INC(ar, phy_errors);
/* Check if combined event available */
if (left_len < sizeof(*comb_event)) {
if (left_len < sizeof(*ev)) {
ath10k_warn(ar, "wmi phyerr combined event wrong len\n");
return;
}
left_len -= sizeof(*comb_event);
left_len -= sizeof(*ev);
/* Check number of included events */
comb_event = (struct wmi_comb_phyerr_rx_event *)skb->data;
count = __le32_to_cpu(comb_event->hdr.num_phyerr_events);
ev = (const struct wmi_phyerr_event *)skb->data;
count = __le32_to_cpu(ev->num_phyerrs);
tsf = __le32_to_cpu(comb_event->hdr.tsf_u32);
tsf = __le32_to_cpu(ev->tsf_u32);
tsf <<= 32;
tsf |= __le32_to_cpu(comb_event->hdr.tsf_l32);
tsf |= __le32_to_cpu(ev->tsf_l32);
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi event phyerr count %d tsf64 0x%llX\n",
count, tsf);
event = (struct wmi_single_phyerr_rx_event *)comb_event->bufp;
phyerr = ev->phyerrs;
for (i = 0; i < count; i++) {
/* Check if we can read event header */
if (left_len < sizeof(*event)) {
if (left_len < sizeof(*phyerr)) {
ath10k_warn(ar, "single event (%d) wrong head len\n",
i);
return;
}
left_len -= sizeof(*event);
left_len -= sizeof(*phyerr);
buf_len = __le32_to_cpu(event->hdr.buf_len);
phy_err_code = event->hdr.phy_err_code;
buf_len = __le32_to_cpu(phyerr->buf_len);
phy_err_code = phyerr->phy_err_code;
if (left_len < buf_len) {
ath10k_warn(ar, "single event (%d) wrong buf len\n", i);
......@@ -1969,20 +2195,20 @@ static void ath10k_wmi_event_phyerr(struct ath10k *ar, struct sk_buff *skb)
switch (phy_err_code) {
case PHY_ERROR_RADAR:
ath10k_wmi_event_dfs(ar, event, tsf);
ath10k_wmi_event_dfs(ar, phyerr, tsf);
break;
case PHY_ERROR_SPECTRAL_SCAN:
ath10k_wmi_event_spectral_scan(ar, event, tsf);
ath10k_wmi_event_spectral_scan(ar, phyerr, tsf);
break;
case PHY_ERROR_FALSE_RADAR_EXT:
ath10k_wmi_event_dfs(ar, event, tsf);
ath10k_wmi_event_spectral_scan(ar, event, tsf);
ath10k_wmi_event_dfs(ar, phyerr, tsf);
ath10k_wmi_event_spectral_scan(ar, phyerr, tsf);
break;
default:
break;
}
event += sizeof(*event) + buf_len;
phyerr = (void *)phyerr + sizeof(*phyerr) + buf_len;
}
}
......@@ -2163,30 +2389,113 @@ static int ath10k_wmi_alloc_host_mem(struct ath10k *ar, u32 req_id,
return 0;
}
static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
struct sk_buff *skb)
static int ath10k_wmi_main_pull_svc_rdy_ev(struct sk_buff *skb,
struct wmi_svc_rdy_ev_arg *arg)
{
struct wmi_service_ready_event *ev = (void *)skb->data;
struct wmi_service_ready_event *ev;
size_t i, n;
if (skb->len < sizeof(*ev))
return -EPROTO;
ev = (void *)skb->data;
skb_pull(skb, sizeof(*ev));
arg->min_tx_power = ev->hw_min_tx_power;
arg->max_tx_power = ev->hw_max_tx_power;
arg->ht_cap = ev->ht_cap_info;
arg->vht_cap = ev->vht_cap_info;
arg->sw_ver0 = ev->sw_version;
arg->sw_ver1 = ev->sw_version_1;
arg->phy_capab = ev->phy_capability;
arg->num_rf_chains = ev->num_rf_chains;
arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
arg->num_mem_reqs = ev->num_mem_reqs;
arg->service_map = ev->wmi_service_bitmap;
n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
ARRAY_SIZE(arg->mem_reqs));
for (i = 0; i < n; i++)
arg->mem_reqs[i] = &ev->mem_reqs[i];
if (skb->len <
__le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
return -EPROTO;
return 0;
}
static int ath10k_wmi_10x_pull_svc_rdy_ev(struct sk_buff *skb,
struct wmi_svc_rdy_ev_arg *arg)
{
struct wmi_10x_service_ready_event *ev;
int i, n;
if (skb->len < sizeof(*ev))
return -EPROTO;
ev = (void *)skb->data;
skb_pull(skb, sizeof(*ev));
arg->min_tx_power = ev->hw_min_tx_power;
arg->max_tx_power = ev->hw_max_tx_power;
arg->ht_cap = ev->ht_cap_info;
arg->vht_cap = ev->vht_cap_info;
arg->sw_ver0 = ev->sw_version;
arg->phy_capab = ev->phy_capability;
arg->num_rf_chains = ev->num_rf_chains;
arg->eeprom_rd = ev->hal_reg_capabilities.eeprom_rd;
arg->num_mem_reqs = ev->num_mem_reqs;
arg->service_map = ev->wmi_service_bitmap;
n = min_t(size_t, __le32_to_cpu(arg->num_mem_reqs),
ARRAY_SIZE(arg->mem_reqs));
for (i = 0; i < n; i++)
arg->mem_reqs[i] = &ev->mem_reqs[i];
if (skb->len <
__le32_to_cpu(arg->num_mem_reqs) * sizeof(arg->mem_reqs[0]))
return -EPROTO;
return 0;
}
static void ath10k_wmi_event_service_ready(struct ath10k *ar,
struct sk_buff *skb)
{
struct wmi_svc_rdy_ev_arg arg = {};
u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
DECLARE_BITMAP(svc_bmap, WMI_SERVICE_MAX) = {};
int ret;
if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
ret = ath10k_wmi_10x_pull_svc_rdy_ev(skb, &arg);
wmi_10x_svc_map(arg.service_map, svc_bmap);
} else {
ret = ath10k_wmi_main_pull_svc_rdy_ev(skb, &arg);
wmi_main_svc_map(arg.service_map, svc_bmap);
}
if (skb->len < sizeof(*ev)) {
ath10k_warn(ar, "Service ready event was %d B but expected %zu B. Wrong firmware version?\n",
skb->len, sizeof(*ev));
if (ret) {
ath10k_warn(ar, "failed to parse service ready: %d\n", ret);
return;
}
ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power);
ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power);
ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info);
ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info);
ar->hw_min_tx_power = __le32_to_cpu(arg.min_tx_power);
ar->hw_max_tx_power = __le32_to_cpu(arg.max_tx_power);
ar->ht_cap_info = __le32_to_cpu(arg.ht_cap);
ar->vht_cap_info = __le32_to_cpu(arg.vht_cap);
ar->fw_version_major =
(__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24;
ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff);
(__le32_to_cpu(arg.sw_ver0) & 0xff000000) >> 24;
ar->fw_version_minor = (__le32_to_cpu(arg.sw_ver0) & 0x00ffffff);
ar->fw_version_release =
(__le32_to_cpu(ev->sw_version_1) & 0xffff0000) >> 16;
ar->fw_version_build = (__le32_to_cpu(ev->sw_version_1) & 0x0000ffff);
ar->phy_capability = __le32_to_cpu(ev->phy_capability);
ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
(__le32_to_cpu(arg.sw_ver1) & 0xffff0000) >> 16;
ar->fw_version_build = (__le32_to_cpu(arg.sw_ver1) & 0x0000ffff);
ar->phy_capability = __le32_to_cpu(arg.phy_capab);
ar->num_rf_chains = __le32_to_cpu(arg.num_rf_chains);
ar->ath_common.regulatory.current_rd = __le32_to_cpu(arg.eeprom_rd);
ath10k_debug_read_service_map(ar, svc_bmap, sizeof(svc_bmap));
ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
arg.service_map, sizeof(arg.service_map));
/* only manually set fw features when not using FW IE format */
if (ar->fw_api == 1 && ar->fw_version_build > 636)
......@@ -2198,13 +2507,8 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
}
ar->ath_common.regulatory.current_rd =
__le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd);
wmi_main_svc_map(ev->wmi_service_bitmap, svc_bmap);
ath10k_debug_read_service_map(ar, svc_bmap, sizeof(svc_bmap));
ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
ev->wmi_service_bitmap, sizeof(ev->wmi_service_bitmap));
ar->supp_tx_chainmask = (1 << ar->num_rf_chains) - 1;
ar->supp_rx_chainmask = (1 << ar->num_rf_chains) - 1;
if (strlen(ar->hw->wiphy->fw_version) == 0) {
snprintf(ar->hw->wiphy->fw_version,
......@@ -2216,93 +2520,18 @@ static void ath10k_wmi_service_ready_event_rx(struct ath10k *ar,
ar->fw_version_build);
}
/* FIXME: it probably should be better to support this */
if (__le32_to_cpu(ev->num_mem_reqs) > 0) {
ath10k_warn(ar, "target requested %d memory chunks; ignoring\n",
__le32_to_cpu(ev->num_mem_reqs));
}
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi event service ready sw_ver 0x%08x sw_ver1 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n",
__le32_to_cpu(ev->sw_version),
__le32_to_cpu(ev->sw_version_1),
__le32_to_cpu(ev->abi_version),
__le32_to_cpu(ev->phy_capability),
__le32_to_cpu(ev->ht_cap_info),
__le32_to_cpu(ev->vht_cap_info),
__le32_to_cpu(ev->vht_supp_mcs),
__le32_to_cpu(ev->sys_cap_info),
__le32_to_cpu(ev->num_mem_reqs),
__le32_to_cpu(ev->num_rf_chains));
complete(&ar->wmi.service_ready);
}
static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar,
struct sk_buff *skb)
{
u32 num_units, req_id, unit_size, num_mem_reqs, num_unit_info, i;
int ret;
struct wmi_service_ready_event_10x *ev = (void *)skb->data;
DECLARE_BITMAP(svc_bmap, WMI_SERVICE_MAX) = {};
if (skb->len < sizeof(*ev)) {
ath10k_warn(ar, "Service ready event was %d B but expected %zu B. Wrong firmware version?\n",
skb->len, sizeof(*ev));
return;
}
ar->hw_min_tx_power = __le32_to_cpu(ev->hw_min_tx_power);
ar->hw_max_tx_power = __le32_to_cpu(ev->hw_max_tx_power);
ar->ht_cap_info = __le32_to_cpu(ev->ht_cap_info);
ar->vht_cap_info = __le32_to_cpu(ev->vht_cap_info);
ar->fw_version_major =
(__le32_to_cpu(ev->sw_version) & 0xff000000) >> 24;
ar->fw_version_minor = (__le32_to_cpu(ev->sw_version) & 0x00ffffff);
ar->phy_capability = __le32_to_cpu(ev->phy_capability);
ar->num_rf_chains = __le32_to_cpu(ev->num_rf_chains);
if (ar->num_rf_chains > WMI_MAX_SPATIAL_STREAM) {
ath10k_warn(ar, "hardware advertises support for more spatial streams than it should (%d > %d)\n",
ar->num_rf_chains, WMI_MAX_SPATIAL_STREAM);
ar->num_rf_chains = WMI_MAX_SPATIAL_STREAM;
}
ar->ath_common.regulatory.current_rd =
__le32_to_cpu(ev->hal_reg_capabilities.eeprom_rd);
wmi_10x_svc_map(ev->wmi_service_bitmap, svc_bmap);
ath10k_debug_read_service_map(ar, svc_bmap, sizeof(svc_bmap));
ath10k_dbg_dump(ar, ATH10K_DBG_WMI, NULL, "wmi svc: ",
ev->wmi_service_bitmap, sizeof(ev->wmi_service_bitmap));
if (strlen(ar->hw->wiphy->fw_version) == 0) {
snprintf(ar->hw->wiphy->fw_version,
sizeof(ar->hw->wiphy->fw_version),
"%u.%u",
ar->fw_version_major,
ar->fw_version_minor);
}
num_mem_reqs = __le32_to_cpu(ev->num_mem_reqs);
if (num_mem_reqs > ATH10K_MAX_MEM_REQS) {
num_mem_reqs = __le32_to_cpu(arg.num_mem_reqs);
if (num_mem_reqs > WMI_MAX_MEM_REQS) {
ath10k_warn(ar, "requested memory chunks number (%d) exceeds the limit\n",
num_mem_reqs);
return;
}
if (!num_mem_reqs)
goto exit;
ath10k_dbg(ar, ATH10K_DBG_WMI, "firmware has requested %d memory chunks\n",
num_mem_reqs);
for (i = 0; i < num_mem_reqs; ++i) {
req_id = __le32_to_cpu(ev->mem_reqs[i].req_id);
num_units = __le32_to_cpu(ev->mem_reqs[i].num_units);
unit_size = __le32_to_cpu(ev->mem_reqs[i].unit_size);
num_unit_info = __le32_to_cpu(ev->mem_reqs[i].num_unit_info);
req_id = __le32_to_cpu(arg.mem_reqs[i]->req_id);
num_units = __le32_to_cpu(arg.mem_reqs[i]->num_units);
unit_size = __le32_to_cpu(arg.mem_reqs[i]->unit_size);
num_unit_info = __le32_to_cpu(arg.mem_reqs[i]->num_unit_info);
if (num_unit_info & NUM_UNITS_IS_NUM_PEERS)
/* number of units to allocate is number of
......@@ -2316,7 +2545,7 @@ static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar,
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi mem_req_id %d num_units %d num_unit_info %d unit size %d actual units %d\n",
req_id,
__le32_to_cpu(ev->mem_reqs[i].num_units),
__le32_to_cpu(arg.mem_reqs[i]->num_units),
num_unit_info,
unit_size,
num_units);
......@@ -2327,23 +2556,23 @@ static void ath10k_wmi_10x_service_ready_event_rx(struct ath10k *ar,
return;
}
exit:
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi event service ready sw_ver 0x%08x abi_ver %u phy_cap 0x%08x ht_cap 0x%08x vht_cap 0x%08x vht_supp_msc 0x%08x sys_cap_info 0x%08x mem_reqs %u num_rf_chains %u\n",
__le32_to_cpu(ev->sw_version),
__le32_to_cpu(ev->abi_version),
__le32_to_cpu(ev->phy_capability),
__le32_to_cpu(ev->ht_cap_info),
__le32_to_cpu(ev->vht_cap_info),
__le32_to_cpu(ev->vht_supp_mcs),
__le32_to_cpu(ev->sys_cap_info),
__le32_to_cpu(ev->num_mem_reqs),
__le32_to_cpu(ev->num_rf_chains));
"wmi event service ready min_tx_power 0x%08x max_tx_power 0x%08x ht_cap 0x%08x vht_cap 0x%08x sw_ver0 0x%08x sw_ver1 0x%08x phy_capab 0x%08x num_rf_chains 0x%08x eeprom_rd 0x%08x num_mem_reqs 0x%08x\n",
__le32_to_cpu(arg.min_tx_power),
__le32_to_cpu(arg.max_tx_power),
__le32_to_cpu(arg.ht_cap),
__le32_to_cpu(arg.vht_cap),
__le32_to_cpu(arg.sw_ver0),
__le32_to_cpu(arg.sw_ver1),
__le32_to_cpu(arg.phy_capab),
__le32_to_cpu(arg.num_rf_chains),
__le32_to_cpu(arg.eeprom_rd),
__le32_to_cpu(arg.num_mem_reqs));
complete(&ar->wmi.service_ready);
}
static int ath10k_wmi_ready_event_rx(struct ath10k *ar, struct sk_buff *skb)
static int ath10k_wmi_event_ready(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_ready_event *ev = (struct wmi_ready_event *)skb->data;
......@@ -2466,10 +2695,10 @@ static void ath10k_wmi_main_process_rx(struct ath10k *ar, struct sk_buff *skb)
ath10k_wmi_event_vdev_install_key_complete(ar, skb);
break;
case WMI_SERVICE_READY_EVENTID:
ath10k_wmi_service_ready_event_rx(ar, skb);
ath10k_wmi_event_service_ready(ar, skb);
break;
case WMI_READY_EVENTID:
ath10k_wmi_ready_event_rx(ar, skb);
ath10k_wmi_event_ready(ar, skb);
break;
default:
ath10k_warn(ar, "Unknown eventid: %d\n", id);
......@@ -2586,10 +2815,10 @@ static void ath10k_wmi_10x_process_rx(struct ath10k *ar, struct sk_buff *skb)
ath10k_wmi_event_vdev_resume_req(ar, skb);
break;
case WMI_10X_SERVICE_READY_EVENTID:
ath10k_wmi_10x_service_ready_event_rx(ar, skb);
ath10k_wmi_event_service_ready(ar, skb);
break;
case WMI_10X_READY_EVENTID:
ath10k_wmi_ready_event_rx(ar, skb);
ath10k_wmi_event_ready(ar, skb);
break;
case WMI_10X_PDEV_UTF_EVENTID:
/* ignore utf events */
......@@ -2697,10 +2926,10 @@ static void ath10k_wmi_10_2_process_rx(struct ath10k *ar, struct sk_buff *skb)
ath10k_wmi_event_vdev_resume_req(ar, skb);
break;
case WMI_10_2_SERVICE_READY_EVENTID:
ath10k_wmi_10x_service_ready_event_rx(ar, skb);
ath10k_wmi_event_service_ready(ar, skb);
break;
case WMI_10_2_READY_EVENTID:
ath10k_wmi_ready_event_rx(ar, skb);
ath10k_wmi_event_ready(ar, skb);
break;
case WMI_10_2_RTT_KEEPALIVE_EVENTID:
case WMI_10_2_GPIO_INPUT_EVENTID:
......@@ -2732,45 +2961,6 @@ static void ath10k_wmi_process_rx(struct ath10k *ar, struct sk_buff *skb)
}
}
/* WMI Initialization functions */
int ath10k_wmi_attach(struct ath10k *ar)
{
if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features))
ar->wmi.cmd = &wmi_10_2_cmd_map;
else
ar->wmi.cmd = &wmi_10x_cmd_map;
ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
} else {
ar->wmi.cmd = &wmi_cmd_map;
ar->wmi.vdev_param = &wmi_vdev_param_map;
ar->wmi.pdev_param = &wmi_pdev_param_map;
}
init_completion(&ar->wmi.service_ready);
init_completion(&ar->wmi.unified_ready);
init_waitqueue_head(&ar->wmi.tx_credits_wq);
return 0;
}
void ath10k_wmi_detach(struct ath10k *ar)
{
int i;
/* free the host memory chunks requested by firmware */
for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
dma_free_coherent(ar->dev,
ar->wmi.mem_chunks[i].len,
ar->wmi.mem_chunks[i].vaddr,
ar->wmi.mem_chunks[i].paddr);
}
ar->wmi.num_mem_chunks = 0;
}
int ath10k_wmi_connect(struct ath10k *ar)
{
int status;
......@@ -2865,42 +3055,6 @@ int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
ctl2g, ctl5g);
}
int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
const struct wmi_channel_arg *arg)
{
struct wmi_set_channel_cmd *cmd;
struct sk_buff *skb;
u32 ch_flags = 0;
if (arg->passive)
return -EINVAL;
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
return -ENOMEM;
if (arg->chan_radar)
ch_flags |= WMI_CHAN_FLAG_DFS;
cmd = (struct wmi_set_channel_cmd *)skb->data;
cmd->chan.mhz = __cpu_to_le32(arg->freq);
cmd->chan.band_center_freq1 = __cpu_to_le32(arg->freq);
cmd->chan.mode = arg->mode;
cmd->chan.flags |= __cpu_to_le32(ch_flags);
cmd->chan.min_power = arg->min_power;
cmd->chan.max_power = arg->max_power;
cmd->chan.reg_power = arg->max_reg_power;
cmd->chan.reg_classid = arg->reg_class_id;
cmd->chan.antenna_max = arg->max_antenna_gain;
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi set channel mode %d freq %d\n",
arg->mode, arg->freq);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->pdev_set_channel_cmdid);
}
int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt)
{
struct wmi_pdev_suspend_cmd *cmd;
......@@ -2951,13 +3105,34 @@ int ath10k_wmi_pdev_set_param(struct ath10k *ar, u32 id, u32 value)
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->pdev_set_param_cmdid);
}
static void ath10k_wmi_put_host_mem_chunks(struct ath10k *ar,
struct wmi_host_mem_chunks *chunks)
{
struct host_memory_chunk *chunk;
int i;
chunks->count = __cpu_to_le32(ar->wmi.num_mem_chunks);
for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
chunk = &chunks->items[i];
chunk->ptr = __cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
chunk->size = __cpu_to_le32(ar->wmi.mem_chunks[i].len);
chunk->req_id = __cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi chunk %d len %d requested, addr 0x%llx\n",
i,
ar->wmi.mem_chunks[i].len,
(unsigned long long)ar->wmi.mem_chunks[i].paddr);
}
}
static int ath10k_wmi_main_cmd_init(struct ath10k *ar)
{
struct wmi_init_cmd *cmd;
struct sk_buff *buf;
struct wmi_resource_config config = {};
u32 len, val;
int i;
config.num_vdevs = __cpu_to_le32(TARGET_NUM_VDEVS);
config.num_peers = __cpu_to_le32(TARGET_NUM_PEERS + TARGET_NUM_VDEVS);
......@@ -3019,32 +3194,8 @@ static int ath10k_wmi_main_cmd_init(struct ath10k *ar)
cmd = (struct wmi_init_cmd *)buf->data;
if (ar->wmi.num_mem_chunks == 0) {
cmd->num_host_mem_chunks = 0;
goto out;
}
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
ar->wmi.num_mem_chunks);
cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
cmd->host_mem_chunks[i].ptr =
__cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
cmd->host_mem_chunks[i].size =
__cpu_to_le32(ar->wmi.mem_chunks[i].len);
cmd->host_mem_chunks[i].req_id =
__cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi chunk %d len %d requested, addr 0x%llx\n",
i,
ar->wmi.mem_chunks[i].len,
(unsigned long long)ar->wmi.mem_chunks[i].paddr);
}
out:
memcpy(&cmd->resource_config, &config, sizeof(config));
ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init\n");
return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
......@@ -3056,7 +3207,6 @@ static int ath10k_wmi_10x_cmd_init(struct ath10k *ar)
struct sk_buff *buf;
struct wmi_resource_config_10x config = {};
u32 len, val;
int i;
config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
......@@ -3110,32 +3260,8 @@ static int ath10k_wmi_10x_cmd_init(struct ath10k *ar)
cmd = (struct wmi_init_cmd_10x *)buf->data;
if (ar->wmi.num_mem_chunks == 0) {
cmd->num_host_mem_chunks = 0;
goto out;
}
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
ar->wmi.num_mem_chunks);
cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
cmd->host_mem_chunks[i].ptr =
__cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
cmd->host_mem_chunks[i].size =
__cpu_to_le32(ar->wmi.mem_chunks[i].len);
cmd->host_mem_chunks[i].req_id =
__cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi chunk %d len %d requested, addr 0x%llx\n",
i,
ar->wmi.mem_chunks[i].len,
(unsigned long long)ar->wmi.mem_chunks[i].paddr);
}
out:
memcpy(&cmd->resource_config, &config, sizeof(config));
ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10x\n");
return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
......@@ -3147,7 +3273,6 @@ static int ath10k_wmi_10_2_cmd_init(struct ath10k *ar)
struct sk_buff *buf;
struct wmi_resource_config_10x config = {};
u32 len, val;
int i;
config.num_vdevs = __cpu_to_le32(TARGET_10X_NUM_VDEVS);
config.num_peers = __cpu_to_le32(TARGET_10X_NUM_PEERS);
......@@ -3201,32 +3326,8 @@ static int ath10k_wmi_10_2_cmd_init(struct ath10k *ar)
cmd = (struct wmi_init_cmd_10_2 *)buf->data;
if (ar->wmi.num_mem_chunks == 0) {
cmd->num_host_mem_chunks = 0;
goto out;
}
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi sending %d memory chunks info.\n",
ar->wmi.num_mem_chunks);
cmd->num_host_mem_chunks = __cpu_to_le32(ar->wmi.num_mem_chunks);
for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
cmd->host_mem_chunks[i].ptr =
__cpu_to_le32(ar->wmi.mem_chunks[i].paddr);
cmd->host_mem_chunks[i].size =
__cpu_to_le32(ar->wmi.mem_chunks[i].len);
cmd->host_mem_chunks[i].req_id =
__cpu_to_le32(ar->wmi.mem_chunks[i].req_id);
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi chunk %d len %d requested, addr 0x%llx\n",
i,
ar->wmi.mem_chunks[i].len,
(unsigned long long)ar->wmi.mem_chunks[i].paddr);
}
out:
memcpy(&cmd->resource_config.common, &config, sizeof(config));
ath10k_wmi_put_host_mem_chunks(ar, &cmd->mem_chunks);
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi init 10.2\n");
return ath10k_wmi_cmd_send(ar, buf, ar->wmi.cmd->init_cmdid);
......@@ -3248,52 +3349,50 @@ int ath10k_wmi_cmd_init(struct ath10k *ar)
return ret;
}
static int ath10k_wmi_start_scan_calc_len(struct ath10k *ar,
const struct wmi_start_scan_arg *arg)
static int ath10k_wmi_start_scan_verify(const struct wmi_start_scan_arg *arg)
{
int len;
if (arg->ie_len && !arg->ie)
return -EINVAL;
if (arg->n_channels && !arg->channels)
return -EINVAL;
if (arg->n_ssids && !arg->ssids)
return -EINVAL;
if (arg->n_bssids && !arg->bssids)
return -EINVAL;
if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
len = sizeof(struct wmi_start_scan_cmd_10x);
else
len = sizeof(struct wmi_start_scan_cmd);
if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN)
return -EINVAL;
if (arg->n_channels > ARRAY_SIZE(arg->channels))
return -EINVAL;
if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID)
return -EINVAL;
if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID)
return -EINVAL;
if (arg->ie_len) {
if (!arg->ie)
return -EINVAL;
if (arg->ie_len > WLAN_SCAN_PARAMS_MAX_IE_LEN)
return -EINVAL;
return 0;
}
static size_t
ath10k_wmi_start_scan_tlvs_len(const struct wmi_start_scan_arg *arg)
{
int len = 0;
if (arg->ie_len) {
len += sizeof(struct wmi_ie_data);
len += roundup(arg->ie_len, 4);
}
if (arg->n_channels) {
if (!arg->channels)
return -EINVAL;
if (arg->n_channels > ARRAY_SIZE(arg->channels))
return -EINVAL;
len += sizeof(struct wmi_chan_list);
len += sizeof(__le32) * arg->n_channels;
}
if (arg->n_ssids) {
if (!arg->ssids)
return -EINVAL;
if (arg->n_ssids > WLAN_SCAN_PARAMS_MAX_SSID)
return -EINVAL;
len += sizeof(struct wmi_ssid_list);
len += sizeof(struct wmi_ssid) * arg->n_ssids;
}
if (arg->n_bssids) {
if (!arg->bssids)
return -EINVAL;
if (arg->n_bssids > WLAN_SCAN_PARAMS_MAX_BSSID)
return -EINVAL;
len += sizeof(struct wmi_bssid_list);
len += sizeof(struct wmi_mac_addr) * arg->n_bssids;
}
......@@ -3301,28 +3400,12 @@ static int ath10k_wmi_start_scan_calc_len(struct ath10k *ar,
return len;
}
int ath10k_wmi_start_scan(struct ath10k *ar,
const struct wmi_start_scan_arg *arg)
static void
ath10k_wmi_put_start_scan_common(struct wmi_start_scan_common *cmn,
const struct wmi_start_scan_arg *arg)
{
struct wmi_start_scan_cmd *cmd;
struct sk_buff *skb;
struct wmi_ie_data *ie;
struct wmi_chan_list *channels;
struct wmi_ssid_list *ssids;
struct wmi_bssid_list *bssids;
u32 scan_id;
u32 scan_req_id;
int off;
int len = 0;
int i;
len = ath10k_wmi_start_scan_calc_len(ar, arg);
if (len < 0)
return len; /* len contains error code here */
skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)
return -ENOMEM;
scan_id = WMI_HOST_SCAN_REQ_ID_PREFIX;
scan_id |= arg->scan_id;
......@@ -3330,35 +3413,36 @@ int ath10k_wmi_start_scan(struct ath10k *ar,
scan_req_id = WMI_HOST_SCAN_REQUESTOR_ID_PREFIX;
scan_req_id |= arg->scan_req_id;
cmd = (struct wmi_start_scan_cmd *)skb->data;
cmd->scan_id = __cpu_to_le32(scan_id);
cmd->scan_req_id = __cpu_to_le32(scan_req_id);
cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
cmd->scan_priority = __cpu_to_le32(arg->scan_priority);
cmd->notify_scan_events = __cpu_to_le32(arg->notify_scan_events);
cmd->dwell_time_active = __cpu_to_le32(arg->dwell_time_active);
cmd->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive);
cmd->min_rest_time = __cpu_to_le32(arg->min_rest_time);
cmd->max_rest_time = __cpu_to_le32(arg->max_rest_time);
cmd->repeat_probe_time = __cpu_to_le32(arg->repeat_probe_time);
cmd->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time);
cmd->idle_time = __cpu_to_le32(arg->idle_time);
cmd->max_scan_time = __cpu_to_le32(arg->max_scan_time);
cmd->probe_delay = __cpu_to_le32(arg->probe_delay);
cmd->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags);
/* TLV list starts after fields included in the struct */
/* There's just one filed that differes the two start_scan
* structures - burst_duration, which we are not using btw,
no point to make the split here, just shift the buffer to fit with
given FW */
if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
off = sizeof(struct wmi_start_scan_cmd_10x);
else
off = sizeof(struct wmi_start_scan_cmd);
cmn->scan_id = __cpu_to_le32(scan_id);
cmn->scan_req_id = __cpu_to_le32(scan_req_id);
cmn->vdev_id = __cpu_to_le32(arg->vdev_id);
cmn->scan_priority = __cpu_to_le32(arg->scan_priority);
cmn->notify_scan_events = __cpu_to_le32(arg->notify_scan_events);
cmn->dwell_time_active = __cpu_to_le32(arg->dwell_time_active);
cmn->dwell_time_passive = __cpu_to_le32(arg->dwell_time_passive);
cmn->min_rest_time = __cpu_to_le32(arg->min_rest_time);
cmn->max_rest_time = __cpu_to_le32(arg->max_rest_time);
cmn->repeat_probe_time = __cpu_to_le32(arg->repeat_probe_time);
cmn->probe_spacing_time = __cpu_to_le32(arg->probe_spacing_time);
cmn->idle_time = __cpu_to_le32(arg->idle_time);
cmn->max_scan_time = __cpu_to_le32(arg->max_scan_time);
cmn->probe_delay = __cpu_to_le32(arg->probe_delay);
cmn->scan_ctrl_flags = __cpu_to_le32(arg->scan_ctrl_flags);
}
static void
ath10k_wmi_put_start_scan_tlvs(struct wmi_start_scan_tlvs *tlvs,
const struct wmi_start_scan_arg *arg)
{
struct wmi_ie_data *ie;
struct wmi_chan_list *channels;
struct wmi_ssid_list *ssids;
struct wmi_bssid_list *bssids;
void *ptr = tlvs->tlvs;
int i;
if (arg->n_channels) {
channels = (void *)skb->data + off;
channels = ptr;
channels->tag = __cpu_to_le32(WMI_CHAN_LIST_TAG);
channels->num_chan = __cpu_to_le32(arg->n_channels);
......@@ -3366,12 +3450,12 @@ int ath10k_wmi_start_scan(struct ath10k *ar,
channels->channel_list[i].freq =
__cpu_to_le16(arg->channels[i]);
off += sizeof(*channels);
off += sizeof(__le32) * arg->n_channels;
ptr += sizeof(*channels);
ptr += sizeof(__le32) * arg->n_channels;
}
if (arg->n_ssids) {
ssids = (void *)skb->data + off;
ssids = ptr;
ssids->tag = __cpu_to_le32(WMI_SSID_LIST_TAG);
ssids->num_ssids = __cpu_to_le32(arg->n_ssids);
......@@ -3383,12 +3467,12 @@ int ath10k_wmi_start_scan(struct ath10k *ar,
arg->ssids[i].len);
}
off += sizeof(*ssids);
off += sizeof(struct wmi_ssid) * arg->n_ssids;
ptr += sizeof(*ssids);
ptr += sizeof(struct wmi_ssid) * arg->n_ssids;
}
if (arg->n_bssids) {
bssids = (void *)skb->data + off;
bssids = ptr;
bssids->tag = __cpu_to_le32(WMI_BSSID_LIST_TAG);
bssids->num_bssid = __cpu_to_le32(arg->n_bssids);
......@@ -3397,23 +3481,57 @@ int ath10k_wmi_start_scan(struct ath10k *ar,
arg->bssids[i].bssid,
ETH_ALEN);
off += sizeof(*bssids);
off += sizeof(struct wmi_mac_addr) * arg->n_bssids;
ptr += sizeof(*bssids);
ptr += sizeof(struct wmi_mac_addr) * arg->n_bssids;
}
if (arg->ie_len) {
ie = (void *)skb->data + off;
ie = ptr;
ie->tag = __cpu_to_le32(WMI_IE_TAG);
ie->ie_len = __cpu_to_le32(arg->ie_len);
memcpy(ie->ie_data, arg->ie, arg->ie_len);
off += sizeof(*ie);
off += roundup(arg->ie_len, 4);
ptr += sizeof(*ie);
ptr += roundup(arg->ie_len, 4);
}
}
if (off != skb->len) {
dev_kfree_skb(skb);
return -EINVAL;
int ath10k_wmi_start_scan(struct ath10k *ar,
const struct wmi_start_scan_arg *arg)
{
struct sk_buff *skb;
size_t len;
int ret;
ret = ath10k_wmi_start_scan_verify(arg);
if (ret)
return ret;
if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features))
len = sizeof(struct wmi_10x_start_scan_cmd) +
ath10k_wmi_start_scan_tlvs_len(arg);
else
len = sizeof(struct wmi_start_scan_cmd) +
ath10k_wmi_start_scan_tlvs_len(arg);
skb = ath10k_wmi_alloc_skb(ar, len);
if (!skb)
return -ENOMEM;
if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
struct wmi_10x_start_scan_cmd *cmd;
cmd = (struct wmi_10x_start_scan_cmd *)skb->data;
ath10k_wmi_put_start_scan_common(&cmd->common, arg);
ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
} else {
struct wmi_start_scan_cmd *cmd;
cmd = (struct wmi_start_scan_cmd *)skb->data;
cmd->burst_duration_ms = __cpu_to_le32(0);
ath10k_wmi_put_start_scan_common(&cmd->common, arg);
ath10k_wmi_put_start_scan_tlvs(&cmd->tlvs, arg);
}
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi start scan\n");
......@@ -3532,7 +3650,6 @@ ath10k_wmi_vdev_start_restart(struct ath10k *ar,
struct sk_buff *skb;
const char *cmdname;
u32 flags = 0;
u32 ch_flags = 0;
if (cmd_id != ar->wmi.cmd->vdev_start_request_cmdid &&
cmd_id != ar->wmi.cmd->vdev_restart_request_cmdid)
......@@ -3559,8 +3676,6 @@ ath10k_wmi_vdev_start_restart(struct ath10k *ar,
flags |= WMI_VDEV_START_HIDDEN_SSID;
if (arg->pmf_enabled)
flags |= WMI_VDEV_START_PMF_ENABLED;
if (arg->channel.chan_radar)
ch_flags |= WMI_CHAN_FLAG_DFS;
cmd = (struct wmi_vdev_start_request_cmd *)skb->data;
cmd->vdev_id = __cpu_to_le32(arg->vdev_id);
......@@ -3576,18 +3691,7 @@ ath10k_wmi_vdev_start_restart(struct ath10k *ar,
memcpy(cmd->ssid.ssid, arg->ssid, arg->ssid_len);
}
cmd->chan.mhz = __cpu_to_le32(arg->channel.freq);
cmd->chan.band_center_freq1 =
__cpu_to_le32(arg->channel.band_center_freq1);
cmd->chan.mode = arg->channel.mode;
cmd->chan.flags |= __cpu_to_le32(ch_flags);
cmd->chan.min_power = arg->channel.min_power;
cmd->chan.max_power = arg->channel.max_power;
cmd->chan.reg_power = arg->channel.max_reg_power;
cmd->chan.reg_classid = arg->channel.reg_class_id;
cmd->chan.antenna_max = arg->channel.max_antenna_gain;
ath10k_wmi_put_wmi_channel(&cmd->chan, &arg->channel);
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi vdev %s id 0x%x flags: 0x%0X, freq %d, mode %d, ch_flags: 0x%0X, max_power: %d\n",
......@@ -3968,35 +4072,10 @@ int ath10k_wmi_scan_chan_list(struct ath10k *ar,
cmd->num_scan_chans = __cpu_to_le32(arg->n_channels);
for (i = 0; i < arg->n_channels; i++) {
u32 flags = 0;
ch = &arg->channels[i];
ci = &cmd->chan_info[i];
if (ch->passive)
flags |= WMI_CHAN_FLAG_PASSIVE;
if (ch->allow_ibss)
flags |= WMI_CHAN_FLAG_ADHOC_ALLOWED;
if (ch->allow_ht)
flags |= WMI_CHAN_FLAG_ALLOW_HT;
if (ch->allow_vht)
flags |= WMI_CHAN_FLAG_ALLOW_VHT;
if (ch->ht40plus)
flags |= WMI_CHAN_FLAG_HT40_PLUS;
if (ch->chan_radar)
flags |= WMI_CHAN_FLAG_DFS;
ci->mhz = __cpu_to_le32(ch->freq);
ci->band_center_freq1 = __cpu_to_le32(ch->freq);
ci->band_center_freq2 = 0;
ci->min_power = ch->min_power;
ci->max_power = ch->max_power;
ci->reg_power = ch->max_reg_power;
ci->antenna_max = ch->max_antenna_gain;
/* mode & flags share storage */
ci->mode = ch->mode;
ci->flags |= __cpu_to_le32(flags);
ath10k_wmi_put_wmi_channel(ci, ch);
}
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->scan_chan_list_cmdid);
......@@ -4267,3 +4346,74 @@ int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable)
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->dbglog_cfg_cmdid);
}
int ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 ev_bitmap)
{
struct wmi_pdev_pktlog_enable_cmd *cmd;
struct sk_buff *skb;
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
return -ENOMEM;
ev_bitmap &= ATH10K_PKTLOG_ANY;
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi enable pktlog filter:%x\n", ev_bitmap);
cmd = (struct wmi_pdev_pktlog_enable_cmd *)skb->data;
cmd->ev_bitmap = __cpu_to_le32(ev_bitmap);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->pdev_pktlog_enable_cmdid);
}
int ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar)
{
struct sk_buff *skb;
skb = ath10k_wmi_alloc_skb(ar, 0);
if (!skb)
return -ENOMEM;
ath10k_dbg(ar, ATH10K_DBG_WMI, "wmi disable pktlog\n");
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->pdev_pktlog_disable_cmdid);
}
int ath10k_wmi_attach(struct ath10k *ar)
{
if (test_bit(ATH10K_FW_FEATURE_WMI_10X, ar->fw_features)) {
if (test_bit(ATH10K_FW_FEATURE_WMI_10_2, ar->fw_features))
ar->wmi.cmd = &wmi_10_2_cmd_map;
else
ar->wmi.cmd = &wmi_10x_cmd_map;
ar->wmi.vdev_param = &wmi_10x_vdev_param_map;
ar->wmi.pdev_param = &wmi_10x_pdev_param_map;
} else {
ar->wmi.cmd = &wmi_cmd_map;
ar->wmi.vdev_param = &wmi_vdev_param_map;
ar->wmi.pdev_param = &wmi_pdev_param_map;
}
init_completion(&ar->wmi.service_ready);
init_completion(&ar->wmi.unified_ready);
init_waitqueue_head(&ar->wmi.tx_credits_wq);
return 0;
}
void ath10k_wmi_detach(struct ath10k *ar)
{
int i;
/* free the host memory chunks requested by firmware */
for (i = 0; i < ar->wmi.num_mem_chunks; i++) {
dma_free_coherent(ar->dev,
ar->wmi.mem_chunks[i].len,
ar->wmi.mem_chunks[i].vaddr,
ar->wmi.mem_chunks[i].paddr);
}
ar->wmi.num_mem_chunks = 0;
}
......@@ -1428,11 +1428,11 @@ struct wmi_service_ready_event {
* where FW can access this memory directly (or) by DMA.
*/
__le32 num_mem_reqs;
struct wlan_host_mem_req mem_reqs[1];
struct wlan_host_mem_req mem_reqs[0];
} __packed;
/* This is the definition from 10.X firmware branch */
struct wmi_service_ready_event_10x {
struct wmi_10x_service_ready_event {
__le32 sw_version;
__le32 abi_version;
......@@ -1467,7 +1467,7 @@ struct wmi_service_ready_event_10x {
*/
__le32 num_mem_reqs;
struct wlan_host_mem_req mem_reqs[1];
struct wlan_host_mem_req mem_reqs[0];
} __packed;
#define WMI_SERVICE_READY_TIMEOUT_HZ (5*HZ)
......@@ -1883,38 +1883,26 @@ struct host_memory_chunk {
__le32 size;
} __packed;
struct wmi_host_mem_chunks {
__le32 count;
/* some fw revisions require at least 1 chunk regardless of count */
struct host_memory_chunk items[1];
} __packed;
struct wmi_init_cmd {
struct wmi_resource_config resource_config;
__le32 num_host_mem_chunks;
/*
* variable number of host memory chunks.
* This should be the last element in the structure
*/
struct host_memory_chunk host_mem_chunks[1];
struct wmi_host_mem_chunks mem_chunks;
} __packed;
/* _10x stucture is from 10.X FW API */
struct wmi_init_cmd_10x {
struct wmi_resource_config_10x resource_config;
__le32 num_host_mem_chunks;
/*
* variable number of host memory chunks.
* This should be the last element in the structure
*/
struct host_memory_chunk host_mem_chunks[1];
struct wmi_host_mem_chunks mem_chunks;
} __packed;
struct wmi_init_cmd_10_2 {
struct wmi_resource_config_10_2 resource_config;
__le32 num_host_mem_chunks;
/*
* variable number of host memory chunks.
* This should be the last element in the structure
*/
struct host_memory_chunk host_mem_chunks[1];
struct wmi_host_mem_chunks mem_chunks;
} __packed;
struct wmi_chan_list_entry {
......@@ -1974,7 +1962,7 @@ enum wmi_scan_priority {
WMI_SCAN_PRIORITY_COUNT /* number of priorities supported */
};
struct wmi_start_scan_cmd {
struct wmi_start_scan_common {
/* Scan ID */
__le32 scan_id;
/* Scan requestor ID */
......@@ -2032,95 +2020,25 @@ struct wmi_start_scan_cmd {
__le32 probe_delay;
/* Scan control flags */
__le32 scan_ctrl_flags;
/* Burst duration time in msecs */
__le32 burst_duration;
/*
* TLV (tag length value ) paramerters follow the scan_cmd structure.
* TLV can contain channel list, bssid list, ssid list and
* ie. the TLV tags are defined above;
*/
} __packed;
/* This is the definition from 10.X firmware branch */
struct wmi_start_scan_cmd_10x {
/* Scan ID */
__le32 scan_id;
/* Scan requestor ID */
__le32 scan_req_id;
/* VDEV id(interface) that is requesting scan */
__le32 vdev_id;
/* Scan Priority, input to scan scheduler */
__le32 scan_priority;
/* Scan events subscription */
__le32 notify_scan_events;
/* dwell time in msec on active channels */
__le32 dwell_time_active;
/* dwell time in msec on passive channels */
__le32 dwell_time_passive;
/*
* min time in msec on the BSS channel,only valid if atleast one
* VDEV is active
*/
__le32 min_rest_time;
/*
* max rest time in msec on the BSS channel,only valid if at least
* one VDEV is active
*/
/*
* the scanner will rest on the bss channel at least min_rest_time
* after min_rest_time the scanner will start checking for tx/rx
* activity on all VDEVs. if there is no activity the scanner will
* switch to off channel. if there is activity the scanner will let
* the radio on the bss channel until max_rest_time expires.at
* max_rest_time scanner will switch to off channel irrespective of
* activity. activity is determined by the idle_time parameter.
*/
__le32 max_rest_time;
/*
* time before sending next set of probe requests.
* The scanner keeps repeating probe requests transmission with
* period specified by repeat_probe_time.
* The number of probe requests specified depends on the ssid_list
* and bssid_list
*/
__le32 repeat_probe_time;
/* time in msec between 2 consequetive probe requests with in a set. */
__le32 probe_spacing_time;
/*
* data inactivity time in msec on bss channel that will be used by
* scanner for measuring the inactivity.
*/
__le32 idle_time;
/* maximum time in msec allowed for scan */
__le32 max_scan_time;
/*
* delay in msec before sending first probe request after switching
* to a channel
struct wmi_start_scan_tlvs {
/* TLV parameters. These includes channel list, ssid list, bssid list,
* extra ies.
*/
__le32 probe_delay;
u8 tlvs[0];
} __packed;
/* Scan control flags */
__le32 scan_ctrl_flags;
struct wmi_start_scan_cmd {
struct wmi_start_scan_common common;
__le32 burst_duration_ms;
struct wmi_start_scan_tlvs tlvs;
} __packed;
/*
* TLV (tag length value ) paramerters follow the scan_cmd structure.
* TLV can contain channel list, bssid list, ssid list and
* ie. the TLV tags are defined above;
*/
/* This is the definition from 10.X firmware branch */
struct wmi_10x_start_scan_cmd {
struct wmi_start_scan_common common;
struct wmi_start_scan_tlvs tlvs;
} __packed;
struct wmi_ssid_arg {
......@@ -2306,94 +2224,25 @@ struct wmi_mgmt_rx_event_v2 {
#define PHY_ERROR_FALSE_RADAR_EXT 0x24
#define PHY_ERROR_RADAR 0x05
struct wmi_single_phyerr_rx_hdr {
/* TSF timestamp */
struct wmi_phyerr {
__le32 tsf_timestamp;
/*
* Current freq1, freq2
*
* [7:0]: freq1[lo]
* [15:8] : freq1[hi]
* [23:16]: freq2[lo]
* [31:24]: freq2[hi]
*/
__le16 freq1;
__le16 freq2;
/*
* Combined RSSI over all chains and channel width for this PHY error
*
* [7:0]: RSSI combined
* [15:8]: Channel width (MHz)
* [23:16]: PHY error code
* [24:16]: reserved (future use)
*/
u8 rssi_combined;
u8 chan_width_mhz;
u8 phy_err_code;
u8 rsvd0;
/*
* RSSI on chain 0 through 3
*
* This is formatted the same as the PPDU_START RX descriptor
* field:
*
* [7:0]: pri20
* [15:8]: sec20
* [23:16]: sec40
* [31:24]: sec80
*/
__le32 rssi_chain0;
__le32 rssi_chain1;
__le32 rssi_chain2;
__le32 rssi_chain3;
/*
* Last calibrated NF value for chain 0 through 3
*
* nf_list_1:
*
* + [15:0] - chain 0
* + [31:16] - chain 1
*
* nf_list_2:
*
* + [15:0] - chain 2
* + [31:16] - chain 3
*/
__le32 nf_list_1;
__le32 nf_list_2;
/* Length of the frame */
__le32 rssi_chains[4];
__le16 nf_chains[4];
__le32 buf_len;
u8 buf[0];
} __packed;
struct wmi_single_phyerr_rx_event {
/* Phy error event header */
struct wmi_single_phyerr_rx_hdr hdr;
/* frame buffer */
u8 bufp[0];
} __packed;
struct wmi_comb_phyerr_rx_hdr {
/* Phy error phy error count */
__le32 num_phyerr_events;
struct wmi_phyerr_event {
__le32 num_phyerrs;
__le32 tsf_l32;
__le32 tsf_u32;
} __packed;
struct wmi_comb_phyerr_rx_event {
/* Phy error phy error count */
struct wmi_comb_phyerr_rx_hdr hdr;
/*
* frame buffer - contains multiple payloads in the order:
* header - payload, header - payload...
* (The header is of type: wmi_single_phyerr_rx_hdr)
*/
u8 bufp[0];
struct wmi_phyerr phyerrs[0];
} __packed;
#define PHYERR_TLV_SIG 0xBB
......@@ -2908,11 +2757,6 @@ enum wmi_tp_scale {
WMI_TP_SCALE_SIZE = 5, /* max num of enum */
};
struct wmi_set_channel_cmd {
/* channel (only frequency and mode info are used) */
struct wmi_channel chan;
} __packed;
struct wmi_pdev_chanlist_update_event {
/* number of channels */
__le32 num_chan;
......@@ -2943,6 +2787,10 @@ struct wmi_pdev_set_channel_cmd {
struct wmi_channel chan;
} __packed;
struct wmi_pdev_pktlog_enable_cmd {
__le32 ev_bitmap;
} __packed;
/* Customize the DSCP (bit) to TID (0-7) mapping for QOS */
#define WMI_DSCP_MAP_MAX (64)
struct wmi_pdev_set_dscp_tid_map_cmd {
......@@ -3177,7 +3025,7 @@ struct wmi_stats_event {
* PDEV statistics
* TODO: add all PDEV stats here
*/
struct wmi_pdev_stats_old {
struct wmi_pdev_stats {
__le32 chan_nf; /* Channel noise floor */
__le32 tx_frame_count; /* TX frame count */
__le32 rx_frame_count; /* RX frame count */
......@@ -3188,15 +3036,8 @@ struct wmi_pdev_stats_old {
struct wal_dbg_stats wal; /* WAL dbg stats */
} __packed;
struct wmi_pdev_stats_10x {
__le32 chan_nf; /* Channel noise floor */
__le32 tx_frame_count; /* TX frame count */
__le32 rx_frame_count; /* RX frame count */
__le32 rx_clear_count; /* rx clear count */
__le32 cycle_count; /* cycle count */
__le32 phy_err_count; /* Phy error count */
__le32 chan_tx_pwr; /* channel tx power */
struct wal_dbg_stats wal; /* WAL dbg stats */
struct wmi_10x_pdev_stats {
struct wmi_pdev_stats old;
__le32 ack_rx_bad;
__le32 rts_bad;
__le32 rts_good;
......@@ -3217,16 +3058,14 @@ struct wmi_vdev_stats {
* peer statistics.
* TODO: add more stats
*/
struct wmi_peer_stats_old {
struct wmi_peer_stats {
struct wmi_mac_addr peer_macaddr;
__le32 peer_rssi;
__le32 peer_tx_rate;
} __packed;
struct wmi_peer_stats_10x {
struct wmi_mac_addr peer_macaddr;
__le32 peer_rssi;
__le32 peer_tx_rate;
struct wmi_10x_peer_stats {
struct wmi_peer_stats old;
__le32 peer_rx_rate;
} __packed;
......@@ -4719,8 +4558,26 @@ struct wmi_dbglog_cfg_cmd {
/* By default disable power save for IBSS */
#define ATH10K_DEFAULT_ATIM 0
#define WMI_MAX_MEM_REQS 16
struct wmi_svc_rdy_ev_arg {
__le32 min_tx_power;
__le32 max_tx_power;
__le32 ht_cap;
__le32 vht_cap;
__le32 sw_ver0;
__le32 sw_ver1;
__le32 phy_capab;
__le32 num_rf_chains;
__le32 eeprom_rd;
__le32 num_mem_reqs;
const __le32 *service_map;
const struct wlan_host_mem_req *mem_reqs[WMI_MAX_MEM_REQS];
};
struct ath10k;
struct ath10k_vif;
struct ath10k_fw_stats;
int ath10k_wmi_attach(struct ath10k *ar);
void ath10k_wmi_detach(struct ath10k *ar);
......@@ -4732,8 +4589,6 @@ int ath10k_wmi_connect(struct ath10k *ar);
struct sk_buff *ath10k_wmi_alloc_skb(struct ath10k *ar, u32 len);
int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
int ath10k_wmi_pdev_set_channel(struct ath10k *ar,
const struct wmi_channel_arg *);
int ath10k_wmi_pdev_suspend_target(struct ath10k *ar, u32 suspend_opt);
int ath10k_wmi_pdev_resume_target(struct ath10k *ar);
int ath10k_wmi_pdev_set_regdomain(struct ath10k *ar, u16 rd, u16 rd2g,
......@@ -4794,5 +4649,9 @@ int ath10k_wmi_force_fw_hang(struct ath10k *ar,
enum wmi_force_fw_hang_type type, u32 delay_ms);
int ath10k_wmi_mgmt_tx(struct ath10k *ar, struct sk_buff *skb);
int ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable);
int ath10k_wmi_pull_fw_stats(struct ath10k *ar, struct sk_buff *skb,
struct ath10k_fw_stats *stats);
int ath10k_wmi_pdev_pktlog_enable(struct ath10k *ar, u32 ev_list);
int ath10k_wmi_pdev_pktlog_disable(struct ath10k *ar);
#endif /* _WMI_H_ */
......@@ -22,7 +22,7 @@
#define ATH6KL_MAX_IE 256
__printf(2, 3) int ath6kl_printk(const char *level, const char *fmt, ...);
__printf(2, 3) void ath6kl_printk(const char *level, const char *fmt, ...);
/*
* Reflects the version of binary interface exposed by ATH6KL target
......
......@@ -37,76 +37,64 @@ struct ath6kl_fwlog_slot {
#define ATH6KL_FWLOG_VALID_MASK 0x1ffff
int ath6kl_printk(const char *level, const char *fmt, ...)
void ath6kl_printk(const char *level, const char *fmt, ...)
{
struct va_format vaf;
va_list args;
int rtn;
va_start(args, fmt);
vaf.fmt = fmt;
vaf.va = &args;
rtn = printk("%sath6kl: %pV", level, &vaf);
printk("%sath6kl: %pV", level, &vaf);
va_end(args);
return rtn;
}
EXPORT_SYMBOL(ath6kl_printk);
int ath6kl_info(const char *fmt, ...)
void ath6kl_info(const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
};
va_list args;
int ret;
va_start(args, fmt);
vaf.va = &args;
ret = ath6kl_printk(KERN_INFO, "%pV", &vaf);
ath6kl_printk(KERN_INFO, "%pV", &vaf);
trace_ath6kl_log_info(&vaf);
va_end(args);
return ret;
}
EXPORT_SYMBOL(ath6kl_info);
int ath6kl_err(const char *fmt, ...)
void ath6kl_err(const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
};
va_list args;
int ret;
va_start(args, fmt);
vaf.va = &args;
ret = ath6kl_printk(KERN_ERR, "%pV", &vaf);
ath6kl_printk(KERN_ERR, "%pV", &vaf);
trace_ath6kl_log_err(&vaf);
va_end(args);
return ret;
}
EXPORT_SYMBOL(ath6kl_err);
int ath6kl_warn(const char *fmt, ...)
void ath6kl_warn(const char *fmt, ...)
{
struct va_format vaf = {
.fmt = fmt,
};
va_list args;
int ret;
va_start(args, fmt);
vaf.va = &args;
ret = ath6kl_printk(KERN_WARNING, "%pV", &vaf);
ath6kl_printk(KERN_WARNING, "%pV", &vaf);
trace_ath6kl_log_warn(&vaf);
va_end(args);
return ret;
}
EXPORT_SYMBOL(ath6kl_warn);
......
......@@ -50,10 +50,10 @@ enum ATH6K_DEBUG_MASK {
};
extern unsigned int debug_mask;
__printf(2, 3) int ath6kl_printk(const char *level, const char *fmt, ...);
__printf(1, 2) int ath6kl_info(const char *fmt, ...);
__printf(1, 2) int ath6kl_err(const char *fmt, ...);
__printf(1, 2) int ath6kl_warn(const char *fmt, ...);
__printf(2, 3) void ath6kl_printk(const char *level, const char *fmt, ...);
__printf(1, 2) void ath6kl_info(const char *fmt, ...);
__printf(1, 2) void ath6kl_err(const char *fmt, ...);
__printf(1, 2) void ath6kl_warn(const char *fmt, ...);
enum ath6kl_war {
ATH6KL_WAR_INVALID_RATE,
......@@ -81,10 +81,9 @@ int ath6kl_debug_init_fs(struct ath6kl *ar);
void ath6kl_debug_cleanup(struct ath6kl *ar);
#else
static inline int ath6kl_dbg(enum ATH6K_DEBUG_MASK dbg_mask,
const char *fmt, ...)
static inline void ath6kl_dbg(enum ATH6K_DEBUG_MASK dbg_mask,
const char *fmt, ...)
{
return 0;
}
static inline void ath6kl_dbg_dump(enum ATH6K_DEBUG_MASK mask,
......
......@@ -463,8 +463,11 @@ struct wil6210_priv {
#define ndev_to_wil(n) (wdev_to_wil(n->ieee80211_ptr))
#define wil_to_pcie_dev(i) (&i->pdev->dev)
__printf(2, 3)
void wil_dbg_trace(struct wil6210_priv *wil, const char *fmt, ...);
__printf(2, 3)
void wil_err(struct wil6210_priv *wil, const char *fmt, ...);
__printf(2, 3)
void wil_info(struct wil6210_priv *wil, const char *fmt, ...);
#define wil_dbg(wil, fmt, arg...) do { \
netdev_dbg(wil_to_ndev(wil), fmt, ##arg); \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment