Commit 8aaa878d authored by Kalle Valo's avatar Kalle Valo

Merge ath-next from ath.git

Major changes:

ath10k:

* add QCA9377 support
* fw_stats support for 10.4 firmware

ath6kl:

* report antenna configuration to user space
* implement ethtool stats
parents 845da6e5 13eff531
......@@ -274,7 +274,7 @@ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
{
struct ath10k *ar = ce_state->ar;
struct ath10k_ce_ring *src_ring = ce_state->src_ring;
struct ce_desc *desc, *sdesc;
struct ce_desc *desc, sdesc;
unsigned int nentries_mask = src_ring->nentries_mask;
unsigned int sw_index = src_ring->sw_index;
unsigned int write_index = src_ring->write_index;
......@@ -294,7 +294,6 @@ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
desc = CE_SRC_RING_TO_DESC(src_ring->base_addr_owner_space,
write_index);
sdesc = CE_SRC_RING_TO_DESC(src_ring->shadow_base, write_index);
desc_flags |= SM(transfer_id, CE_DESC_FLAGS_META_DATA);
......@@ -303,11 +302,11 @@ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
if (flags & CE_SEND_FLAG_BYTE_SWAP)
desc_flags |= CE_DESC_FLAGS_BYTE_SWAP;
sdesc->addr = __cpu_to_le32(buffer);
sdesc->nbytes = __cpu_to_le16(nbytes);
sdesc->flags = __cpu_to_le16(desc_flags);
sdesc.addr = __cpu_to_le32(buffer);
sdesc.nbytes = __cpu_to_le16(nbytes);
sdesc.flags = __cpu_to_le16(desc_flags);
*desc = *sdesc;
*desc = sdesc;
src_ring->per_transfer_context[write_index] = per_transfer_context;
......@@ -579,17 +578,13 @@ int ath10k_ce_revoke_recv_next(struct ath10k_ce_pipe *ce_state,
* The caller takes responsibility for any necessary locking.
*/
int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp)
void **per_transfer_contextp)
{
struct ath10k_ce_ring *src_ring = ce_state->src_ring;
u32 ctrl_addr = ce_state->ctrl_addr;
struct ath10k *ar = ce_state->ar;
unsigned int nentries_mask = src_ring->nentries_mask;
unsigned int sw_index = src_ring->sw_index;
struct ce_desc *sdesc, *sbase;
unsigned int read_index;
if (src_ring->hw_index == sw_index) {
......@@ -614,15 +609,6 @@ int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
if (read_index == sw_index)
return -EIO;
sbase = src_ring->shadow_base;
sdesc = CE_SRC_RING_TO_DESC(sbase, sw_index);
/* Return data from completed source descriptor */
*bufferp = __le32_to_cpu(sdesc->addr);
*nbytesp = __le16_to_cpu(sdesc->nbytes);
*transfer_idp = MS(__le16_to_cpu(sdesc->flags),
CE_DESC_FLAGS_META_DATA);
if (per_transfer_contextp)
*per_transfer_contextp =
src_ring->per_transfer_context[sw_index];
......@@ -697,10 +683,7 @@ int ath10k_ce_cancel_send_next(struct ath10k_ce_pipe *ce_state,
}
int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp)
void **per_transfer_contextp)
{
struct ath10k *ar = ce_state->ar;
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
......@@ -708,9 +691,7 @@ int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
spin_lock_bh(&ar_pci->ce_lock);
ret = ath10k_ce_completed_send_next_nolock(ce_state,
per_transfer_contextp,
bufferp, nbytesp,
transfer_idp);
per_transfer_contextp);
spin_unlock_bh(&ar_pci->ce_lock);
return ret;
......@@ -940,27 +921,6 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
src_ring->base_addr_ce_space_unaligned,
CE_DESC_RING_ALIGN);
/*
* Also allocate a shadow src ring in regular
* mem to use for faster access.
*/
src_ring->shadow_base_unaligned =
kmalloc((nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN), GFP_KERNEL);
if (!src_ring->shadow_base_unaligned) {
dma_free_coherent(ar->dev,
(nentries * sizeof(struct ce_desc) +
CE_DESC_RING_ALIGN),
src_ring->base_addr_owner_space,
src_ring->base_addr_ce_space);
kfree(src_ring);
return ERR_PTR(-ENOMEM);
}
src_ring->shadow_base = PTR_ALIGN(
src_ring->shadow_base_unaligned,
CE_DESC_RING_ALIGN);
return src_ring;
}
......@@ -1139,7 +1099,6 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
if (ce_state->src_ring) {
kfree(ce_state->src_ring->shadow_base_unaligned);
dma_free_coherent(ar->dev,
(ce_state->src_ring->nentries *
sizeof(struct ce_desc) +
......
......@@ -100,12 +100,6 @@ struct ath10k_ce_ring {
/* CE address space */
u32 base_addr_ce_space;
/*
* Start of shadow copy of descriptors, within regular memory.
* Aligned to descriptor-size boundary.
*/
void *shadow_base_unaligned;
struct ce_desc *shadow_base;
/* keep last */
void *per_transfer_context[0];
......@@ -192,16 +186,10 @@ int ath10k_ce_completed_recv_next(struct ath10k_ce_pipe *ce_state,
* Pops 1 completed send buffer from Source ring.
*/
int ath10k_ce_completed_send_next(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp);
void **per_transfer_contextp);
int ath10k_ce_completed_send_next_nolock(struct ath10k_ce_pipe *ce_state,
void **per_transfer_contextp,
u32 *bufferp,
unsigned int *nbytesp,
unsigned int *transfer_idp);
void **per_transfer_contextp);
/*==================CE Engine Initialization=======================*/
......
......@@ -137,6 +137,21 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_ext_size = QCA99X0_BOARD_EXT_DATA_SZ,
},
},
{
.id = QCA9377_HW_1_0_DEV_VERSION,
.name = "qca9377 hw1.0",
.patch_load_addr = QCA9377_HW_1_0_PATCH_LOAD_ADDR,
.uart_pin = 7,
.otp_exe_param = 0,
.fw = {
.dir = QCA9377_HW_1_0_FW_DIR,
.fw = QCA9377_HW_1_0_FW_FILE,
.otp = QCA9377_HW_1_0_OTP_FILE,
.board = QCA9377_HW_1_0_BOARD_DATA_FILE,
.board_size = QCA9377_BOARD_DATA_SZ,
.board_ext_size = QCA9377_BOARD_EXT_DATA_SZ,
},
},
};
static const char *const ath10k_core_fw_feature_str[] = {
......@@ -151,6 +166,7 @@ static const char *const ath10k_core_fw_feature_str[] = {
[ATH10K_FW_FEATURE_NO_NWIFI_DECAP_4ADDR_PADDING] = "no-4addr-pad",
[ATH10K_FW_FEATURE_SUPPORTS_SKIP_CLOCK_INIT] = "skip-clock-init",
[ATH10K_FW_FEATURE_RAW_MODE_SUPPORT] = "raw-mode",
[ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA] = "adaptive-cca",
};
static unsigned int ath10k_core_get_fw_feature_str(char *buf,
......@@ -568,8 +584,8 @@ static int ath10k_download_fw(struct ath10k *ar, enum ath10k_firmware_mode mode)
}
break;
case ATH10K_FIRMWARE_MODE_UTF:
data = ar->testmode.utf->data;
data_len = ar->testmode.utf->size;
data = ar->testmode.utf_firmware_data;
data_len = ar->testmode.utf_firmware_len;
mode_name = "utf";
break;
default:
......@@ -1900,6 +1916,7 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
ar->hw_values = &qca988x_values;
break;
case ATH10K_HW_QCA6174:
case ATH10K_HW_QCA9377:
ar->regs = &qca6174_regs;
ar->hw_values = &qca6174_values;
break;
......
......@@ -214,6 +214,7 @@ struct ath10k_fw_stats_pdev {
s32 hw_queued;
s32 hw_reaped;
s32 underrun;
u32 hw_paused;
s32 tx_abort;
s32 mpdus_requed;
u32 tx_ko;
......@@ -226,6 +227,16 @@ struct ath10k_fw_stats_pdev {
u32 pdev_resets;
u32 phy_underrun;
u32 txop_ovf;
u32 seq_posted;
u32 seq_failed_queueing;
u32 seq_completed;
u32 seq_restarted;
u32 mu_seq_posted;
u32 mpdus_sw_flush;
u32 mpdus_hw_filter;
u32 mpdus_truncated;
u32 mpdus_ack_failed;
u32 mpdus_expired;
/* PDEV RX stats */
s32 mid_ppdu_route_change;
......@@ -242,6 +253,7 @@ struct ath10k_fw_stats_pdev {
s32 phy_errs;
s32 phy_err_drop;
s32 mpdu_errs;
s32 rx_ovfl_errs;
};
struct ath10k_fw_stats {
......@@ -497,6 +509,9 @@ enum ath10k_fw_features {
*/
ATH10K_FW_FEATURE_RAW_MODE_SUPPORT = 10,
/* Firmware Supports Adaptive CCA*/
ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA = 11,
/* keep last */
ATH10K_FW_FEATURE_COUNT,
};
......@@ -730,8 +745,6 @@ struct ath10k {
int num_started_vdevs;
/* Protected by conf-mutex */
u8 supp_tx_chainmask;
u8 supp_rx_chainmask;
u8 cfg_tx_chainmask;
u8 cfg_rx_chainmask;
......@@ -814,9 +827,12 @@ struct ath10k {
struct {
/* protected by conf_mutex */
const struct firmware *utf;
char utf_version[32];
const void *utf_firmware_data;
size_t utf_firmware_len;
DECLARE_BITMAP(orig_fw_features, ATH10K_FW_FEATURE_COUNT);
enum ath10k_fw_wmi_op_version orig_wmi_op_version;
enum ath10k_fw_wmi_op_version op_version;
/* protected by data_lock */
bool utf_monitor;
} testmode;
......
......@@ -84,6 +84,15 @@ enum qca6174_chip_id_rev {
#define QCA99X0_HW_2_0_BOARD_DATA_FILE "board.bin"
#define QCA99X0_HW_2_0_PATCH_LOAD_ADDR 0x1234
/* QCA9377 1.0 definitions */
#define QCA9377_HW_1_0_DEV_VERSION 0x05020001
#define QCA9377_HW_1_0_CHIP_ID_REV 0x1
#define QCA9377_HW_1_0_FW_DIR ATH10K_FW_DIR "/QCA9377/hw1.0"
#define QCA9377_HW_1_0_FW_FILE "firmware.bin"
#define QCA9377_HW_1_0_OTP_FILE "otp.bin"
#define QCA9377_HW_1_0_BOARD_DATA_FILE "board.bin"
#define QCA9377_HW_1_0_PATCH_LOAD_ADDR 0x1234
#define ATH10K_FW_API2_FILE "firmware-2.bin"
#define ATH10K_FW_API3_FILE "firmware-3.bin"
......@@ -94,6 +103,7 @@ enum qca6174_chip_id_rev {
#define ATH10K_FW_API5_FILE "firmware-5.bin"
#define ATH10K_FW_UTF_FILE "utf.bin"
#define ATH10K_FW_UTF_API2_FILE "utf-2.bin"
/* includes also the null byte */
#define ATH10K_FIRMWARE_MAGIC "QCA-ATH10K"
......@@ -176,6 +186,7 @@ enum ath10k_hw_rev {
ATH10K_HW_QCA988X,
ATH10K_HW_QCA6174,
ATH10K_HW_QCA99X0,
ATH10K_HW_QCA9377,
};
struct ath10k_hw_regs {
......@@ -228,6 +239,7 @@ void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
#define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X)
#define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174)
#define QCA_REV_99X0(ar) ((ar)->hw_rev == ATH10K_HW_QCA99X0)
#define QCA_REV_9377(ar) ((ar)->hw_rev == ATH10K_HW_QCA9377)
/* Known pecularities:
* - raw appears in nwifi decap, raw and nwifi appear in ethernet decap
......
......@@ -3736,13 +3736,8 @@ static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
mutex_lock(&ar->conf_mutex);
if (ar->cfg_tx_chainmask) {
*tx_ant = ar->cfg_tx_chainmask;
*rx_ant = ar->cfg_rx_chainmask;
} else {
*tx_ant = ar->supp_tx_chainmask;
*rx_ant = ar->supp_rx_chainmask;
}
*tx_ant = ar->cfg_tx_chainmask;
*rx_ant = ar->cfg_rx_chainmask;
mutex_unlock(&ar->conf_mutex);
......@@ -3762,6 +3757,169 @@ static void ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg)
dbg, cm);
}
static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar)
{
int nsts = ar->vht_cap_info;
nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
/* If firmware does not deliver to host number of space-time
* streams supported, assume it support up to 4 BF STS and return
* the value for VHT CAP: nsts-1)
*/
if (nsts == 0)
return 3;
return nsts;
}
static int ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k *ar)
{
int sound_dim = ar->vht_cap_info;
sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
/* If the sounding dimension is not advertised by the firmware,
* let's use a default value of 1
*/
if (sound_dim == 0)
return 1;
return sound_dim;
}
static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
{
struct ieee80211_sta_vht_cap vht_cap = {0};
u16 mcs_map;
u32 val;
int i;
vht_cap.vht_supported = 1;
vht_cap.cap = ar->vht_cap_info;
if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
val = ath10k_mac_get_vht_cap_bf_sts(ar);
val <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
val &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
vht_cap.cap |= val;
}
if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
val = ath10k_mac_get_vht_cap_bf_sound_dim(ar);
val <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
val &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
vht_cap.cap |= val;
}
mcs_map = 0;
for (i = 0; i < 8; i++) {
if ((i < ar->num_rf_chains) && (ar->cfg_tx_chainmask & BIT(i)))
mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
else
mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
}
vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
return vht_cap;
}
static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar)
{
int i;
struct ieee80211_sta_ht_cap ht_cap = {0};
if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED))
return ht_cap;
ht_cap.ht_supported = 1;
ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
ht_cap.cap |= WLAN_HT_CAP_SM_PS_STATIC << IEEE80211_HT_CAP_SM_PS_SHIFT;
if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI)
ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI)
ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) {
u32 smps;
smps = WLAN_HT_CAP_SM_PS_DYNAMIC;
smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
ht_cap.cap |= smps;
}
if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC)
ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) {
u32 stbc;
stbc = ar->ht_cap_info;
stbc &= WMI_HT_CAP_RX_STBC;
stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT;
stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT;
stbc &= IEEE80211_HT_CAP_RX_STBC;
ht_cap.cap |= stbc;
}
if (ar->ht_cap_info & WMI_HT_CAP_LDPC)
ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT)
ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT;
/* max AMSDU is implicitly taken from vht_cap_info */
if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
for (i = 0; i < ar->num_rf_chains; i++) {
if (ar->cfg_rx_chainmask & BIT(i))
ht_cap.mcs.rx_mask[i] = 0xFF;
}
ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
return ht_cap;
}
static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar)
{
struct ieee80211_supported_band *band;
struct ieee80211_sta_vht_cap vht_cap;
struct ieee80211_sta_ht_cap ht_cap;
ht_cap = ath10k_get_ht_cap(ar);
vht_cap = ath10k_create_vht_cap(ar);
if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
band = &ar->mac.sbands[IEEE80211_BAND_2GHZ];
band->ht_cap = ht_cap;
/* Enable the VHT support at 2.4 GHz */
band->vht_cap = vht_cap;
}
if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
band = &ar->mac.sbands[IEEE80211_BAND_5GHZ];
band->ht_cap = ht_cap;
band->vht_cap = vht_cap;
}
}
static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant)
{
int ret;
......@@ -3794,6 +3952,9 @@ static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant)
return ret;
}
/* Reload HT/VHT capability */
ath10k_mac_setup_ht_vht_cap(ar);
return 0;
}
......@@ -3884,9 +4045,7 @@ static int ath10k_start(struct ieee80211_hw *hw)
}
}
if (ar->cfg_tx_chainmask)
__ath10k_set_antenna(ar, ar->cfg_tx_chainmask,
ar->cfg_rx_chainmask);
__ath10k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask);
/*
* By default FW set ARP frames ac to voice (6). In that case ARP
......@@ -3905,6 +4064,18 @@ static int ath10k_start(struct ieee80211_hw *hw)
goto err_core_stop;
}
if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA,
ar->fw_features)) {
ret = ath10k_wmi_pdev_enable_adaptive_cca(ar, 1,
WMI_CCA_DETECT_LEVEL_AUTO,
WMI_CCA_DETECT_MARGIN_AUTO);
if (ret) {
ath10k_warn(ar, "failed to enable adaptive cca: %d\n",
ret);
goto err_core_stop;
}
}
ret = ath10k_wmi_pdev_set_param(ar,
ar->wmi.pdev_param->ani_enable, 1);
if (ret) {
......@@ -4063,39 +4234,6 @@ static u32 get_nss_from_chainmask(u16 chain_mask)
return 1;
}
static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar)
{
int nsts = ar->vht_cap_info;
nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
/* If firmware does not deliver to host number of space-time
* streams supported, assume it support up to 4 BF STS and return
* the value for VHT CAP: nsts-1)
* */
if (nsts == 0)
return 3;
return nsts;
}
static int ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k *ar)
{
int sound_dim = ar->vht_cap_info;
sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
/* If the sounding dimension is not advertised by the firmware,
* let's use a default value of 1
*/
if (sound_dim == 0)
return 1;
return sound_dim;
}
static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif)
{
u32 value = 0;
......@@ -6949,111 +7087,6 @@ static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = {
},
};
static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
{
struct ieee80211_sta_vht_cap vht_cap = {0};
u16 mcs_map;
u32 val;
int i;
vht_cap.vht_supported = 1;
vht_cap.cap = ar->vht_cap_info;
if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
val = ath10k_mac_get_vht_cap_bf_sts(ar);
val <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
val &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
vht_cap.cap |= val;
}
if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
val = ath10k_mac_get_vht_cap_bf_sound_dim(ar);
val <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
val &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
vht_cap.cap |= val;
}
mcs_map = 0;
for (i = 0; i < 8; i++) {
if (i < ar->num_rf_chains)
mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i*2);
else
mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i*2);
}
vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
return vht_cap;
}
static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar)
{
int i;
struct ieee80211_sta_ht_cap ht_cap = {0};
if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED))
return ht_cap;
ht_cap.ht_supported = 1;
ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
ht_cap.cap |= WLAN_HT_CAP_SM_PS_STATIC << IEEE80211_HT_CAP_SM_PS_SHIFT;
if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI)
ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI)
ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) {
u32 smps;
smps = WLAN_HT_CAP_SM_PS_DYNAMIC;
smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
ht_cap.cap |= smps;
}
if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC)
ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) {
u32 stbc;
stbc = ar->ht_cap_info;
stbc &= WMI_HT_CAP_RX_STBC;
stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT;
stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT;
stbc &= IEEE80211_HT_CAP_RX_STBC;
ht_cap.cap |= stbc;
}
if (ar->ht_cap_info & WMI_HT_CAP_LDPC)
ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT)
ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT;
/* max AMSDU is implicitly taken from vht_cap_info */
if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
for (i = 0; i < ar->num_rf_chains; i++)
ht_cap.mcs.rx_mask[i] = 0xFF;
ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
return ht_cap;
}
static void ath10k_get_arvif_iter(void *data, u8 *mac,
struct ieee80211_vif *vif)
{
......@@ -7095,8 +7128,6 @@ int ath10k_mac_register(struct ath10k *ar)
WLAN_CIPHER_SUITE_AES_CMAC,
};
struct ieee80211_supported_band *band;
struct ieee80211_sta_vht_cap vht_cap;
struct ieee80211_sta_ht_cap ht_cap;
void *channels;
int ret;
......@@ -7104,9 +7135,6 @@ int ath10k_mac_register(struct ath10k *ar)
SET_IEEE80211_DEV(ar->hw, ar->dev);
ht_cap = ath10k_get_ht_cap(ar);
vht_cap = ath10k_create_vht_cap(ar);
BUILD_BUG_ON((ARRAY_SIZE(ath10k_2ghz_channels) +
ARRAY_SIZE(ath10k_5ghz_channels)) !=
ATH10K_NUM_CHANS);
......@@ -7125,10 +7153,6 @@ int ath10k_mac_register(struct ath10k *ar)
band->channels = channels;
band->n_bitrates = ath10k_g_rates_size;
band->bitrates = ath10k_g_rates;
band->ht_cap = ht_cap;
/* Enable the VHT support at 2.4 GHz */
band->vht_cap = vht_cap;
ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band;
}
......@@ -7147,18 +7171,18 @@ int ath10k_mac_register(struct ath10k *ar)
band->channels = channels;
band->n_bitrates = ath10k_a_rates_size;
band->bitrates = ath10k_a_rates;
band->ht_cap = ht_cap;
band->vht_cap = vht_cap;
ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = band;
}
ath10k_mac_setup_ht_vht_cap(ar);
ar->hw->wiphy->interface_modes =
BIT(NL80211_IFTYPE_STATION) |
BIT(NL80211_IFTYPE_AP) |
BIT(NL80211_IFTYPE_MESH_POINT);
ar->hw->wiphy->available_antennas_rx = ar->supp_rx_chainmask;
ar->hw->wiphy->available_antennas_tx = ar->supp_tx_chainmask;
ar->hw->wiphy->available_antennas_rx = ar->cfg_rx_chainmask;
ar->hw->wiphy->available_antennas_tx = ar->cfg_tx_chainmask;
if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->fw_features))
ar->hw->wiphy->interface_modes |=
......
......@@ -61,12 +61,14 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
#define QCA6164_2_1_DEVICE_ID (0x0041)
#define QCA6174_2_1_DEVICE_ID (0x003e)
#define QCA99X0_2_0_DEVICE_ID (0x0040)
#define QCA9377_1_0_DEVICE_ID (0x0042)
static const struct pci_device_id ath10k_pci_id_table[] = {
{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
{ PCI_VDEVICE(ATHEROS, QCA6164_2_1_DEVICE_ID) }, /* PCI-E QCA6164 V2.1 */
{ PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
{ PCI_VDEVICE(ATHEROS, QCA99X0_2_0_DEVICE_ID) }, /* PCI-E QCA99X0 V2 */
{ PCI_VDEVICE(ATHEROS, QCA9377_1_0_DEVICE_ID) }, /* PCI-E QCA9377 V1 */
{0}
};
......@@ -90,6 +92,7 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
{ QCA99X0_2_0_DEVICE_ID, QCA99X0_HW_2_0_CHIP_ID_REV },
{ QCA9377_1_0_DEVICE_ID, QCA9377_HW_1_0_CHIP_ID_REV },
};
static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
......@@ -827,6 +830,7 @@ static u32 ath10k_pci_targ_cpu_to_ce_addr(struct ath10k *ar, u32 addr)
switch (ar->hw_rev) {
case ATH10K_HW_QCA988X:
case ATH10K_HW_QCA6174:
case ATH10K_HW_QCA9377:
val = (ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
CORE_CTRL_ADDRESS) &
0x7ff) << 21;
......@@ -910,9 +914,8 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
goto done;
i = 0;
while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf,
&completed_nbytes,
&id) != 0) {
while (ath10k_ce_completed_send_next_nolock(ce_diag,
NULL) != 0) {
mdelay(1);
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
ret = -EBUSY;
......@@ -920,16 +923,6 @@ static int ath10k_pci_diag_read_mem(struct ath10k *ar, u32 address, void *data,
}
}
if (nbytes != completed_nbytes) {
ret = -EIO;
goto done;
}
if (buf != (u32)address) {
ret = -EIO;
goto done;
}
i = 0;
while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
&completed_nbytes,
......@@ -1083,9 +1076,8 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
goto done;
i = 0;
while (ath10k_ce_completed_send_next_nolock(ce_diag, NULL, &buf,
&completed_nbytes,
&id) != 0) {
while (ath10k_ce_completed_send_next_nolock(ce_diag,
NULL) != 0) {
mdelay(1);
if (i++ > DIAG_ACCESS_CE_TIMEOUT_MS) {
......@@ -1094,16 +1086,6 @@ static int ath10k_pci_diag_write_mem(struct ath10k *ar, u32 address,
}
}
if (nbytes != completed_nbytes) {
ret = -EIO;
goto done;
}
if (buf != ce_data) {
ret = -EIO;
goto done;
}
i = 0;
while (ath10k_ce_completed_recv_next_nolock(ce_diag, NULL, &buf,
&completed_nbytes,
......@@ -1159,13 +1141,9 @@ static void ath10k_pci_htc_tx_cb(struct ath10k_ce_pipe *ce_state)
struct ath10k *ar = ce_state->ar;
struct sk_buff_head list;
struct sk_buff *skb;
u32 ce_data;
unsigned int nbytes;
unsigned int transfer_id;
__skb_queue_head_init(&list);
while (ath10k_ce_completed_send_next(ce_state, (void **)&skb, &ce_data,
&nbytes, &transfer_id) == 0) {
while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
/* no need to call tx completion for NULL pointers */
if (skb == NULL)
continue;
......@@ -1235,12 +1213,8 @@ static void ath10k_pci_htt_tx_cb(struct ath10k_ce_pipe *ce_state)
{
struct ath10k *ar = ce_state->ar;
struct sk_buff *skb;
u32 ce_data;
unsigned int nbytes;
unsigned int transfer_id;
while (ath10k_ce_completed_send_next(ce_state, (void **)&skb, &ce_data,
&nbytes, &transfer_id) == 0) {
while (ath10k_ce_completed_send_next(ce_state, (void **)&skb) == 0) {
/* no need to call tx completion for NULL pointers */
if (!skb)
continue;
......@@ -1513,6 +1487,7 @@ static void ath10k_pci_irq_msi_fw_mask(struct ath10k *ar)
switch (ar->hw_rev) {
case ATH10K_HW_QCA988X:
case ATH10K_HW_QCA6174:
case ATH10K_HW_QCA9377:
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
CORE_CTRL_ADDRESS);
val &= ~CORE_CTRL_PCIE_REG_31_MASK;
......@@ -1534,6 +1509,7 @@ static void ath10k_pci_irq_msi_fw_unmask(struct ath10k *ar)
switch (ar->hw_rev) {
case ATH10K_HW_QCA988X:
case ATH10K_HW_QCA6174:
case ATH10K_HW_QCA9377:
val = ath10k_pci_read32(ar, SOC_CORE_BASE_ADDRESS +
CORE_CTRL_ADDRESS);
val |= CORE_CTRL_PCIE_REG_31_MASK;
......@@ -1624,7 +1600,6 @@ static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
struct ath10k_pci *ar_pci;
struct ath10k_ce_pipe *ce_pipe;
struct ath10k_ce_ring *ce_ring;
struct ce_desc *ce_desc;
struct sk_buff *skb;
int i;
......@@ -1639,10 +1614,6 @@ static void ath10k_pci_tx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
if (!pci_pipe->buf_sz)
return;
ce_desc = ce_ring->shadow_base;
if (WARN_ON(!ce_desc))
return;
for (i = 0; i < ce_ring->nentries; i++) {
skb = ce_ring->per_transfer_context[i];
if (!skb)
......@@ -1816,12 +1787,8 @@ static int ath10k_pci_hif_exchange_bmi_msg(struct ath10k *ar,
static void ath10k_pci_bmi_send_done(struct ath10k_ce_pipe *ce_state)
{
struct bmi_xfer *xfer;
u32 ce_data;
unsigned int nbytes;
unsigned int transfer_id;
if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer, &ce_data,
&nbytes, &transfer_id))
if (ath10k_ce_completed_send_next(ce_state, (void **)&xfer))
return;
xfer->tx_done = true;
......@@ -1911,6 +1878,8 @@ static int ath10k_pci_get_num_banks(struct ath10k *ar)
return 9;
}
break;
case QCA9377_1_0_DEVICE_ID:
return 2;
}
ath10k_warn(ar, "unknown number of banks, assuming 1\n");
......@@ -2371,6 +2340,8 @@ static int ath10k_pci_chip_reset(struct ath10k *ar)
return ath10k_pci_qca988x_chip_reset(ar);
else if (QCA_REV_6174(ar))
return ath10k_pci_qca6174_chip_reset(ar);
else if (QCA_REV_9377(ar))
return ath10k_pci_qca6174_chip_reset(ar);
else if (QCA_REV_99X0(ar))
return ath10k_pci_qca99x0_chip_reset(ar);
else
......@@ -3003,6 +2974,10 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
hw_rev = ATH10K_HW_QCA99X0;
pci_ps = false;
break;
case QCA9377_1_0_DEVICE_ID:
hw_rev = ATH10K_HW_QCA9377;
pci_ps = true;
break;
default:
WARN_ON(1);
return -ENOTSUPP;
......@@ -3204,3 +3179,7 @@ MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API4_FILE);
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_FW_API5_FILE);
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" QCA6174_HW_3_0_BOARD_DATA_FILE);
MODULE_FIRMWARE(QCA6174_HW_3_0_FW_DIR "/" ATH10K_BOARD_API2_FILE);
/* QCA9377 1.0 firmware files */
MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" ATH10K_FW_API5_FILE);
MODULE_FIRMWARE(QCA9377_HW_1_0_FW_DIR "/" QCA9377_HW_1_0_BOARD_DATA_FILE);
......@@ -450,6 +450,9 @@ Fw Mode/SubMode Mask
#define QCA6174_BOARD_DATA_SZ 8192
#define QCA6174_BOARD_EXT_DATA_SZ 0
#define QCA9377_BOARD_DATA_SZ QCA6174_BOARD_DATA_SZ
#define QCA9377_BOARD_EXT_DATA_SZ 0
#define QCA99X0_BOARD_DATA_SZ 12288
#define QCA99X0_BOARD_EXT_DATA_SZ 0
......
......@@ -139,11 +139,181 @@ static int ath10k_tm_cmd_get_version(struct ath10k *ar, struct nlattr *tb[])
return cfg80211_testmode_reply(skb);
}
static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
static int ath10k_tm_fetch_utf_firmware_api_2(struct ath10k *ar)
{
size_t len, magic_len, ie_len;
struct ath10k_fw_ie *hdr;
char filename[100];
__le32 *version;
const u8 *data;
int ie_id, ret;
snprintf(filename, sizeof(filename), "%s/%s",
ar->hw_params.fw.dir, ATH10K_FW_UTF_API2_FILE);
/* load utf firmware image */
ret = request_firmware(&ar->testmode.utf, filename, ar->dev);
if (ret) {
ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n",
filename, ret);
return ret;
}
data = ar->testmode.utf->data;
len = ar->testmode.utf->size;
/* FIXME: call release_firmware() in error cases */
/* magic also includes the null byte, check that as well */
magic_len = strlen(ATH10K_FIRMWARE_MAGIC) + 1;
if (len < magic_len) {
ath10k_err(ar, "utf firmware file is too small to contain magic\n");
ret = -EINVAL;
goto err;
}
if (memcmp(data, ATH10K_FIRMWARE_MAGIC, magic_len) != 0) {
ath10k_err(ar, "invalid firmware magic\n");
ret = -EINVAL;
goto err;
}
/* jump over the padding */
magic_len = ALIGN(magic_len, 4);
len -= magic_len;
data += magic_len;
/* loop elements */
while (len > sizeof(struct ath10k_fw_ie)) {
hdr = (struct ath10k_fw_ie *)data;
ie_id = le32_to_cpu(hdr->id);
ie_len = le32_to_cpu(hdr->len);
len -= sizeof(*hdr);
data += sizeof(*hdr);
if (len < ie_len) {
ath10k_err(ar, "invalid length for FW IE %d (%zu < %zu)\n",
ie_id, len, ie_len);
ret = -EINVAL;
goto err;
}
switch (ie_id) {
case ATH10K_FW_IE_FW_VERSION:
if (ie_len > sizeof(ar->testmode.utf_version) - 1)
break;
memcpy(ar->testmode.utf_version, data, ie_len);
ar->testmode.utf_version[ie_len] = '\0';
ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
"testmode found fw utf version %s\n",
ar->testmode.utf_version);
break;
case ATH10K_FW_IE_TIMESTAMP:
/* ignore timestamp, but don't warn about it either */
break;
case ATH10K_FW_IE_FW_IMAGE:
ath10k_dbg(ar, ATH10K_DBG_TESTMODE,
"testmode found fw image ie (%zd B)\n",
ie_len);
ar->testmode.utf_firmware_data = data;
ar->testmode.utf_firmware_len = ie_len;
break;
case ATH10K_FW_IE_WMI_OP_VERSION:
if (ie_len != sizeof(u32))
break;
version = (__le32 *)data;
ar->testmode.op_version = le32_to_cpup(version);
ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode found fw ie wmi op version %d\n",
ar->testmode.op_version);
break;
default:
ath10k_warn(ar, "Unknown testmode FW IE: %u\n",
le32_to_cpu(hdr->id));
break;
}
/* jump over the padding */
ie_len = ALIGN(ie_len, 4);
len -= ie_len;
data += ie_len;
}
if (!ar->testmode.utf_firmware_data || !ar->testmode.utf_firmware_len) {
ath10k_err(ar, "No ATH10K_FW_IE_FW_IMAGE found\n");
ret = -EINVAL;
goto err;
}
return 0;
err:
release_firmware(ar->testmode.utf);
return ret;
}
static int ath10k_tm_fetch_utf_firmware_api_1(struct ath10k *ar)
{
char filename[100];
int ret;
snprintf(filename, sizeof(filename), "%s/%s",
ar->hw_params.fw.dir, ATH10K_FW_UTF_FILE);
/* load utf firmware image */
ret = request_firmware(&ar->testmode.utf, filename, ar->dev);
if (ret) {
ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n",
filename, ret);
return ret;
}
/* We didn't find FW UTF API 1 ("utf.bin") does not advertise
* firmware features. Do an ugly hack where we force the firmware
* features to match with 10.1 branch so that wmi.c will use the
* correct WMI interface.
*/
ar->testmode.op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
ar->testmode.utf_firmware_data = ar->testmode.utf->data;
ar->testmode.utf_firmware_len = ar->testmode.utf->size;
return 0;
}
static int ath10k_tm_fetch_firmware(struct ath10k *ar)
{
int ret;
ret = ath10k_tm_fetch_utf_firmware_api_2(ar);
if (ret == 0) {
ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode using fw utf api 2");
return 0;
}
ret = ath10k_tm_fetch_utf_firmware_api_1(ar);
if (ret) {
ath10k_err(ar, "failed to fetch utf firmware binary: %d", ret);
return ret;
}
ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode using utf api 1");
return 0;
}
static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
{
const char *ver;
int ret;
ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode cmd utf start\n");
mutex_lock(&ar->conf_mutex);
......@@ -165,36 +335,27 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
goto err;
}
snprintf(filename, sizeof(filename), "%s/%s",
ar->hw_params.fw.dir, ATH10K_FW_UTF_FILE);
/* load utf firmware image */
ret = request_firmware(&ar->testmode.utf, filename, ar->dev);
ret = ath10k_tm_fetch_firmware(ar);
if (ret) {
ath10k_warn(ar, "failed to retrieve utf firmware '%s': %d\n",
filename, ret);
ath10k_err(ar, "failed to fetch UTF firmware: %d", ret);
goto err;
}
spin_lock_bh(&ar->data_lock);
ar->testmode.utf_monitor = true;
spin_unlock_bh(&ar->data_lock);
BUILD_BUG_ON(sizeof(ar->fw_features) !=
sizeof(ar->testmode.orig_fw_features));
memcpy(ar->testmode.orig_fw_features, ar->fw_features,
sizeof(ar->fw_features));
ar->testmode.orig_wmi_op_version = ar->wmi.op_version;
/* utf.bin firmware image does not advertise firmware features. Do
* an ugly hack where we force the firmware features so that wmi.c
* will use the correct WMI interface.
*/
memset(ar->fw_features, 0, sizeof(ar->fw_features));
ar->wmi.op_version = ATH10K_FW_WMI_OP_VERSION_10_1;
ar->wmi.op_version = ar->testmode.op_version;
ath10k_dbg(ar, ATH10K_DBG_TESTMODE, "testmode wmi version %d\n",
ar->wmi.op_version);
ret = ath10k_hif_power_up(ar);
if (ret) {
......@@ -212,7 +373,12 @@ static int ath10k_tm_cmd_utf_start(struct ath10k *ar, struct nlattr *tb[])
ar->state = ATH10K_STATE_UTF;
ath10k_info(ar, "UTF firmware started\n");
if (strlen(ar->testmode.utf_version) > 0)
ver = ar->testmode.utf_version;
else
ver = "API 1";
ath10k_info(ar, "UTF firmware %s started\n", ver);
mutex_unlock(&ar->conf_mutex);
......
......@@ -182,6 +182,10 @@ struct wmi_ops {
void (*fw_stats_fill)(struct ath10k *ar,
struct ath10k_fw_stats *fw_stats,
char *buf);
struct sk_buff *(*gen_pdev_enable_adaptive_cca)(struct ath10k *ar,
u8 enable,
u32 detect_level,
u32 detect_margin);
};
int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
......@@ -1302,4 +1306,25 @@ ath10k_wmi_fw_stats_fill(struct ath10k *ar, struct ath10k_fw_stats *fw_stats,
ar->wmi.ops->fw_stats_fill(ar, fw_stats, buf);
return 0;
}
static inline int
ath10k_wmi_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
u32 detect_level, u32 detect_margin)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_pdev_enable_adaptive_cca)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_pdev_enable_adaptive_cca(ar, enable,
detect_level,
detect_margin);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->pdev_enable_adaptive_cca_cmdid);
}
#endif
......@@ -23,6 +23,7 @@
#include "wmi-ops.h"
#include "wmi-tlv.h"
#include "p2p.h"
#include "testmode.h"
/***************/
/* TLV helpers */
......@@ -419,6 +420,7 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
{
struct wmi_cmd_hdr *cmd_hdr;
enum wmi_tlv_event_id id;
bool consumed;
cmd_hdr = (struct wmi_cmd_hdr *)skb->data;
id = MS(__le32_to_cpu(cmd_hdr->cmd_id), WMI_CMD_HDR_CMD_ID);
......@@ -428,6 +430,18 @@ static void ath10k_wmi_tlv_op_rx(struct ath10k *ar, struct sk_buff *skb)
trace_ath10k_wmi_event(ar, id, skb->data, skb->len);
consumed = ath10k_tm_event_wmi(ar, id, skb);
/* Ready event must be handled normally also in UTF mode so that we
* know the UTF firmware has booted, others we are just bypass WMI
* events to testmode.
*/
if (consumed && id != WMI_TLV_READY_EVENTID) {
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi tlv testmode consumed 0x%x\n", id);
goto out;
}
switch (id) {
case WMI_TLV_MGMT_RX_EVENTID:
ath10k_wmi_event_mgmt_rx(ar, skb);
......
......@@ -148,6 +148,7 @@ static struct wmi_cmd_map wmi_cmd_map = {
.gpio_config_cmdid = WMI_GPIO_CONFIG_CMDID,
.gpio_output_cmdid = WMI_GPIO_OUTPUT_CMDID,
.pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
.pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED,
.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
......@@ -313,6 +314,7 @@ static struct wmi_cmd_map wmi_10x_cmd_map = {
.gpio_config_cmdid = WMI_10X_GPIO_CONFIG_CMDID,
.gpio_output_cmdid = WMI_10X_GPIO_OUTPUT_CMDID,
.pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
.pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED,
.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
......@@ -477,6 +479,7 @@ static struct wmi_cmd_map wmi_10_2_4_cmd_map = {
.gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
.gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
.pdev_get_temperature_cmdid = WMI_10_2_PDEV_GET_TEMPERATURE_CMDID,
.pdev_enable_adaptive_cca_cmdid = WMI_10_2_SET_CCA_PARAMS,
.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
......@@ -1407,6 +1410,7 @@ static struct wmi_cmd_map wmi_10_2_cmd_map = {
.gpio_config_cmdid = WMI_10_2_GPIO_CONFIG_CMDID,
.gpio_output_cmdid = WMI_10_2_GPIO_OUTPUT_CMDID,
.pdev_get_temperature_cmdid = WMI_CMD_UNSUPPORTED,
.pdev_enable_adaptive_cca_cmdid = WMI_CMD_UNSUPPORTED,
.scan_update_request_cmdid = WMI_CMD_UNSUPPORTED,
.vdev_standby_response_cmdid = WMI_CMD_UNSUPPORTED,
.vdev_resume_response_cmdid = WMI_CMD_UNSUPPORTED,
......@@ -2475,6 +2479,47 @@ void ath10k_wmi_pull_pdev_stats_tx(const struct wmi_pdev_stats_tx *src,
dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
}
static void
ath10k_wmi_10_4_pull_pdev_stats_tx(const struct wmi_10_4_pdev_stats_tx *src,
struct ath10k_fw_stats_pdev *dst)
{
dst->comp_queued = __le32_to_cpu(src->comp_queued);
dst->comp_delivered = __le32_to_cpu(src->comp_delivered);
dst->msdu_enqued = __le32_to_cpu(src->msdu_enqued);
dst->mpdu_enqued = __le32_to_cpu(src->mpdu_enqued);
dst->wmm_drop = __le32_to_cpu(src->wmm_drop);
dst->local_enqued = __le32_to_cpu(src->local_enqued);
dst->local_freed = __le32_to_cpu(src->local_freed);
dst->hw_queued = __le32_to_cpu(src->hw_queued);
dst->hw_reaped = __le32_to_cpu(src->hw_reaped);
dst->underrun = __le32_to_cpu(src->underrun);
dst->tx_abort = __le32_to_cpu(src->tx_abort);
dst->mpdus_requed = __le32_to_cpu(src->mpdus_requed);
dst->tx_ko = __le32_to_cpu(src->tx_ko);
dst->data_rc = __le32_to_cpu(src->data_rc);
dst->self_triggers = __le32_to_cpu(src->self_triggers);
dst->sw_retry_failure = __le32_to_cpu(src->sw_retry_failure);
dst->illgl_rate_phy_err = __le32_to_cpu(src->illgl_rate_phy_err);
dst->pdev_cont_xretry = __le32_to_cpu(src->pdev_cont_xretry);
dst->pdev_tx_timeout = __le32_to_cpu(src->pdev_tx_timeout);
dst->pdev_resets = __le32_to_cpu(src->pdev_resets);
dst->phy_underrun = __le32_to_cpu(src->phy_underrun);
dst->txop_ovf = __le32_to_cpu(src->txop_ovf);
dst->hw_paused = __le32_to_cpu(src->hw_paused);
dst->seq_posted = __le32_to_cpu(src->seq_posted);
dst->seq_failed_queueing =
__le32_to_cpu(src->seq_failed_queueing);
dst->seq_completed = __le32_to_cpu(src->seq_completed);
dst->seq_restarted = __le32_to_cpu(src->seq_restarted);
dst->mu_seq_posted = __le32_to_cpu(src->mu_seq_posted);
dst->mpdus_sw_flush = __le32_to_cpu(src->mpdus_sw_flush);
dst->mpdus_hw_filter = __le32_to_cpu(src->mpdus_hw_filter);
dst->mpdus_truncated = __le32_to_cpu(src->mpdus_truncated);
dst->mpdus_ack_failed = __le32_to_cpu(src->mpdus_ack_failed);
dst->mpdus_hw_filter = __le32_to_cpu(src->mpdus_hw_filter);
dst->mpdus_expired = __le32_to_cpu(src->mpdus_expired);
}
void ath10k_wmi_pull_pdev_stats_rx(const struct wmi_pdev_stats_rx *src,
struct ath10k_fw_stats_pdev *dst)
{
......@@ -2785,6 +2830,86 @@ static int ath10k_wmi_10_2_4_op_pull_fw_stats(struct ath10k *ar,
return 0;
}
static int ath10k_wmi_10_4_op_pull_fw_stats(struct ath10k *ar,
struct sk_buff *skb,
struct ath10k_fw_stats *stats)
{
const struct wmi_10_2_stats_event *ev = (void *)skb->data;
u32 num_pdev_stats;
u32 num_pdev_ext_stats;
u32 num_vdev_stats;
u32 num_peer_stats;
int i;
if (!skb_pull(skb, sizeof(*ev)))
return -EPROTO;
num_pdev_stats = __le32_to_cpu(ev->num_pdev_stats);
num_pdev_ext_stats = __le32_to_cpu(ev->num_pdev_ext_stats);
num_vdev_stats = __le32_to_cpu(ev->num_vdev_stats);
num_peer_stats = __le32_to_cpu(ev->num_peer_stats);
for (i = 0; i < num_pdev_stats; i++) {
const struct wmi_10_4_pdev_stats *src;
struct ath10k_fw_stats_pdev *dst;
src = (void *)skb->data;
if (!skb_pull(skb, sizeof(*src)))
return -EPROTO;
dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
if (!dst)
continue;
ath10k_wmi_pull_pdev_stats_base(&src->base, dst);
ath10k_wmi_10_4_pull_pdev_stats_tx(&src->tx, dst);
ath10k_wmi_pull_pdev_stats_rx(&src->rx, dst);
dst->rx_ovfl_errs = __le32_to_cpu(src->rx_ovfl_errs);
ath10k_wmi_pull_pdev_stats_extra(&src->extra, dst);
list_add_tail(&dst->list, &stats->pdevs);
}
for (i = 0; i < num_pdev_ext_stats; i++) {
const struct wmi_10_2_pdev_ext_stats *src;
src = (void *)skb->data;
if (!skb_pull(skb, sizeof(*src)))
return -EPROTO;
/* FIXME: expose values to userspace
*
* Note: Even though this loop seems to do nothing it is
* required to parse following sub-structures properly.
*/
}
/* fw doesn't implement vdev stats */
for (i = 0; i < num_peer_stats; i++) {
const struct wmi_10_4_peer_stats *src;
struct ath10k_fw_stats_peer *dst;
src = (void *)skb->data;
if (!skb_pull(skb, sizeof(*src)))
return -EPROTO;
dst = kzalloc(sizeof(*dst), GFP_ATOMIC);
if (!dst)
continue;
ether_addr_copy(dst->peer_macaddr, src->peer_macaddr.addr);
dst->peer_rssi = __le32_to_cpu(src->peer_rssi);
dst->peer_tx_rate = __le32_to_cpu(src->peer_tx_rate);
dst->peer_rx_rate = __le32_to_cpu(src->peer_rx_rate);
/* FIXME: expose 10.4 specific values */
list_add_tail(&dst->list, &stats->peers);
}
return 0;
}
void ath10k_wmi_event_update_stats(struct ath10k *ar, struct sk_buff *skb)
{
ath10k_dbg(ar, ATH10K_DBG_WMI, "WMI_UPDATE_STATS_EVENTID\n");
......@@ -4335,8 +4460,10 @@ static void ath10k_wmi_event_service_ready_work(struct work_struct *work)
ar->num_rf_chains = ar->max_spatial_stream;
}
ar->supp_tx_chainmask = (1 << ar->num_rf_chains) - 1;
ar->supp_rx_chainmask = (1 << ar->num_rf_chains) - 1;
if (!ar->cfg_tx_chainmask) {
ar->cfg_tx_chainmask = (1 << ar->num_rf_chains) - 1;
ar->cfg_rx_chainmask = (1 << ar->num_rf_chains) - 1;
}
if (strlen(ar->hw->wiphy->fw_version) == 0) {
snprintf(ar->hw->wiphy->fw_version,
......@@ -4931,6 +5058,9 @@ static void ath10k_wmi_10_4_op_rx(struct ath10k *ar, struct sk_buff *skb)
ath10k_dbg(ar, ATH10K_DBG_WMI,
"received event id %d not implemented\n", id);
break;
case WMI_10_4_UPDATE_STATS_EVENTID:
ath10k_wmi_event_update_stats(ar, skb);
break;
default:
ath10k_warn(ar, "Unknown eventid: %d\n", id);
break;
......@@ -6996,6 +7126,112 @@ void ath10k_wmi_10x_op_fw_stats_fill(struct ath10k *ar,
buf[len] = 0;
}
static struct sk_buff *
ath10k_wmi_op_gen_pdev_enable_adaptive_cca(struct ath10k *ar, u8 enable,
u32 detect_level, u32 detect_margin)
{
struct wmi_pdev_set_adaptive_cca_params *cmd;
struct sk_buff *skb;
skb = ath10k_wmi_alloc_skb(ar, sizeof(*cmd));
if (!skb)
return ERR_PTR(-ENOMEM);
cmd = (struct wmi_pdev_set_adaptive_cca_params *)skb->data;
cmd->enable = __cpu_to_le32(enable);
cmd->cca_detect_level = __cpu_to_le32(detect_level);
cmd->cca_detect_margin = __cpu_to_le32(detect_margin);
ath10k_dbg(ar, ATH10K_DBG_WMI,
"wmi pdev set adaptive cca params enable:%d detection level:%d detection margin:%d\n",
enable, detect_level, detect_margin);
return skb;
}
void ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k *ar,
struct ath10k_fw_stats *fw_stats,
char *buf)
{
u32 len = 0;
u32 buf_len = ATH10K_FW_STATS_BUF_SIZE;
const struct ath10k_fw_stats_pdev *pdev;
const struct ath10k_fw_stats_vdev *vdev;
const struct ath10k_fw_stats_peer *peer;
size_t num_peers;
size_t num_vdevs;
spin_lock_bh(&ar->data_lock);
pdev = list_first_entry_or_null(&fw_stats->pdevs,
struct ath10k_fw_stats_pdev, list);
if (!pdev) {
ath10k_warn(ar, "failed to get pdev stats\n");
goto unlock;
}
num_peers = ath10k_wmi_fw_stats_num_peers(&fw_stats->peers);
num_vdevs = ath10k_wmi_fw_stats_num_vdevs(&fw_stats->vdevs);
ath10k_wmi_fw_pdev_base_stats_fill(pdev, buf, &len);
ath10k_wmi_fw_pdev_extra_stats_fill(pdev, buf, &len);
ath10k_wmi_fw_pdev_tx_stats_fill(pdev, buf, &len);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"HW paused", pdev->hw_paused);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Seqs posted", pdev->seq_posted);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Seqs failed queueing", pdev->seq_failed_queueing);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Seqs completed", pdev->seq_completed);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Seqs restarted", pdev->seq_restarted);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MU Seqs posted", pdev->mu_seq_posted);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MPDUs SW flushed", pdev->mpdus_sw_flush);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MPDUs HW filtered", pdev->mpdus_hw_filter);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MPDUs truncated", pdev->mpdus_truncated);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MPDUs receive no ACK", pdev->mpdus_ack_failed);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"MPDUs expired", pdev->mpdus_expired);
ath10k_wmi_fw_pdev_rx_stats_fill(pdev, buf, &len);
len += scnprintf(buf + len, buf_len - len, "%30s %10d\n",
"Num Rx Overflow errors", pdev->rx_ovfl_errs);
len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
"ath10k VDEV stats", num_vdevs);
len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
"=================");
list_for_each_entry(vdev, &fw_stats->vdevs, list) {
ath10k_wmi_fw_vdev_stats_fill(vdev, buf, &len);
}
len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "%30s (%zu)\n",
"ath10k PEER stats", num_peers);
len += scnprintf(buf + len, buf_len - len, "%30s\n\n",
"=================");
list_for_each_entry(peer, &fw_stats->peers, list) {
ath10k_wmi_fw_peer_stats_fill(peer, buf, &len);
}
unlock:
spin_unlock_bh(&ar->data_lock);
if (len >= buf_len)
buf[len - 1] = 0;
else
buf[len] = 0;
}
static const struct wmi_ops wmi_ops = {
.rx = ath10k_wmi_op_rx,
.map_svc = wmi_main_svc_map,
......@@ -7059,6 +7295,7 @@ static const struct wmi_ops wmi_ops = {
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
/* .gen_adaptive_qcs not implemented */
/* .gen_pdev_enable_adaptive_cca not implemented */
};
static const struct wmi_ops wmi_10_1_ops = {
......@@ -7125,6 +7362,7 @@ static const struct wmi_ops wmi_10_1_ops = {
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
/* .gen_adaptive_qcs not implemented */
/* .gen_pdev_enable_adaptive_cca not implemented */
};
static const struct wmi_ops wmi_10_2_ops = {
......@@ -7188,6 +7426,7 @@ static const struct wmi_ops wmi_10_2_ops = {
.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
.gen_delba_send = ath10k_wmi_op_gen_delba_send,
.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
/* .gen_pdev_enable_adaptive_cca not implemented */
};
static const struct wmi_ops wmi_10_2_4_ops = {
......@@ -7251,6 +7490,8 @@ static const struct wmi_ops wmi_10_2_4_ops = {
.gen_delba_send = ath10k_wmi_op_gen_delba_send,
.gen_pdev_get_tpc_config = ath10k_wmi_10_2_4_op_gen_pdev_get_tpc_config,
.fw_stats_fill = ath10k_wmi_10x_op_fw_stats_fill,
.gen_pdev_enable_adaptive_cca =
ath10k_wmi_op_gen_pdev_enable_adaptive_cca,
/* .gen_bcn_tmpl not implemented */
/* .gen_prb_tmpl not implemented */
/* .gen_p2p_go_bcn_ie not implemented */
......@@ -7261,6 +7502,7 @@ static const struct wmi_ops wmi_10_4_ops = {
.rx = ath10k_wmi_10_4_op_rx,
.map_svc = wmi_10_4_svc_map,
.pull_fw_stats = ath10k_wmi_10_4_op_pull_fw_stats,
.pull_scan = ath10k_wmi_op_pull_scan_ev,
.pull_mgmt_rx = ath10k_wmi_10_4_op_pull_mgmt_rx_ev,
.pull_ch_info = ath10k_wmi_10_4_op_pull_ch_info_ev,
......@@ -7310,9 +7552,11 @@ static const struct wmi_ops wmi_10_4_ops = {
.gen_addba_send = ath10k_wmi_op_gen_addba_send,
.gen_addba_set_resp = ath10k_wmi_op_gen_addba_set_resp,
.gen_delba_send = ath10k_wmi_op_gen_delba_send,
.fw_stats_fill = ath10k_wmi_10_4_op_fw_stats_fill,
/* shared with 10.2 */
.gen_peer_assoc = ath10k_wmi_10_2_op_gen_peer_assoc,
.gen_request_stats = ath10k_wmi_op_gen_request_stats,
};
int ath10k_wmi_attach(struct ath10k *ar)
......
......@@ -772,6 +772,7 @@ struct wmi_cmd_map {
u32 mu_cal_start_cmdid;
u32 set_cca_params_cmdid;
u32 pdev_bss_chan_info_request_cmdid;
u32 pdev_enable_adaptive_cca_cmdid;
};
/*
......@@ -1381,6 +1382,9 @@ enum wmi_10_2_cmd_id {
WMI_10_2_VDEV_ATF_REQUEST_CMDID,
WMI_10_2_PEER_ATF_REQUEST_CMDID,
WMI_10_2_PDEV_GET_TEMPERATURE_CMDID,
WMI_10_2_MU_CAL_START_CMDID,
WMI_10_2_SET_LTEU_CONFIG_CMDID,
WMI_10_2_SET_CCA_PARAMS,
WMI_10_2_PDEV_UTF_CMDID = WMI_10_2_END_CMDID - 1,
};
......@@ -3862,6 +3866,111 @@ struct wmi_pdev_stats_tx {
__le32 txop_ovf;
} __packed;
struct wmi_10_4_pdev_stats_tx {
/* Num HTT cookies queued to dispatch list */
__le32 comp_queued;
/* Num HTT cookies dispatched */
__le32 comp_delivered;
/* Num MSDU queued to WAL */
__le32 msdu_enqued;
/* Num MPDU queue to WAL */
__le32 mpdu_enqued;
/* Num MSDUs dropped by WMM limit */
__le32 wmm_drop;
/* Num Local frames queued */
__le32 local_enqued;
/* Num Local frames done */
__le32 local_freed;
/* Num queued to HW */
__le32 hw_queued;
/* Num PPDU reaped from HW */
__le32 hw_reaped;
/* Num underruns */
__le32 underrun;
/* HW Paused. */
__le32 hw_paused;
/* Num PPDUs cleaned up in TX abort */
__le32 tx_abort;
/* Num MPDUs requed by SW */
__le32 mpdus_requed;
/* excessive retries */
__le32 tx_ko;
/* data hw rate code */
__le32 data_rc;
/* Scheduler self triggers */
__le32 self_triggers;
/* frames dropped due to excessive sw retries */
__le32 sw_retry_failure;
/* illegal rate phy errors */
__le32 illgl_rate_phy_err;
/* wal pdev continuous xretry */
__le32 pdev_cont_xretry;
/* wal pdev tx timeouts */
__le32 pdev_tx_timeout;
/* wal pdev resets */
__le32 pdev_resets;
/* frames dropped due to non-availability of stateless TIDs */
__le32 stateless_tid_alloc_failure;
__le32 phy_underrun;
/* MPDU is more than txop limit */
__le32 txop_ovf;
/* Number of Sequences posted */
__le32 seq_posted;
/* Number of Sequences failed queueing */
__le32 seq_failed_queueing;
/* Number of Sequences completed */
__le32 seq_completed;
/* Number of Sequences restarted */
__le32 seq_restarted;
/* Number of MU Sequences posted */
__le32 mu_seq_posted;
/* Num MPDUs flushed by SW, HWPAUSED,SW TXABORT(Reset,channel change) */
__le32 mpdus_sw_flush;
/* Num MPDUs filtered by HW, all filter condition (TTL expired) */
__le32 mpdus_hw_filter;
/* Num MPDUs truncated by PDG
* (TXOP, TBTT, PPDU_duration based on rate, dyn_bw)
*/
__le32 mpdus_truncated;
/* Num MPDUs that was tried but didn't receive ACK or BA */
__le32 mpdus_ack_failed;
/* Num MPDUs that was dropped due to expiry. */
__le32 mpdus_expired;
} __packed;
struct wmi_pdev_stats_rx {
/* Cnts any change in ring routing mid-ppdu */
__le32 mid_ppdu_route_change;
......@@ -4035,6 +4144,16 @@ struct wmi_10_2_pdev_stats {
struct wmi_pdev_stats_extra extra;
} __packed;
struct wmi_10_4_pdev_stats {
struct wmi_pdev_stats_base base;
struct wmi_10_4_pdev_stats_tx tx;
struct wmi_pdev_stats_rx rx;
__le32 rx_ovfl_errs;
struct wmi_pdev_stats_mem mem;
__le32 sram_free_size;
struct wmi_pdev_stats_extra extra;
} __packed;
/*
* VDEV statistics
* TODO: add all VDEV stats here
......@@ -4076,6 +4195,23 @@ struct wmi_10_2_4_peer_stats {
__le32 unknown_value; /* FIXME: what is this word? */
} __packed;
struct wmi_10_4_peer_stats {
struct wmi_mac_addr peer_macaddr;
__le32 peer_rssi;
__le32 peer_rssi_seq_num;
__le32 peer_tx_rate;
__le32 peer_rx_rate;
__le32 current_per;
__le32 retries;
__le32 tx_rate_count;
__le32 max_4ms_frame_len;
__le32 total_sub_frames;
__le32 tx_bytes;
__le32 num_pkt_loss_overflow[4];
__le32 num_pkt_loss_excess_retry[4];
__le32 peer_rssi_changed;
} __packed;
struct wmi_10_2_pdev_ext_stats {
__le32 rx_rssi_comb;
__le32 rx_rssi[4];
......@@ -6094,6 +6230,15 @@ enum wmi_txbf_conf {
WMI_TXBF_CONF_AFTER_ASSOC,
};
#define WMI_CCA_DETECT_LEVEL_AUTO 0
#define WMI_CCA_DETECT_MARGIN_AUTO 0
struct wmi_pdev_set_adaptive_cca_params {
__le32 enable;
__le32 cca_detect_level;
__le32 cca_detect_margin;
} __packed;
struct ath10k;
struct ath10k_vif;
struct ath10k_fw_stats_pdev;
......@@ -6188,5 +6333,8 @@ void ath10k_wmi_10x_op_fw_stats_fill(struct ath10k *ar,
char *buf);
size_t ath10k_wmi_fw_stats_num_peers(struct list_head *head);
size_t ath10k_wmi_fw_stats_num_vdevs(struct list_head *head);
void ath10k_wmi_10_4_op_fw_stats_fill(struct ath10k *ar,
struct ath10k_fw_stats *fw_stats,
char *buf);
#endif /* _WMI_H_ */
......@@ -3231,6 +3231,15 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
wait, buf, len, no_cck);
}
static int ath6kl_get_antenna(struct wiphy *wiphy,
u32 *tx_ant, u32 *rx_ant)
{
struct ath6kl *ar = wiphy_priv(wiphy);
*tx_ant = ar->hw.tx_ant;
*rx_ant = ar->hw.rx_ant;
return 0;
}
static void ath6kl_mgmt_frame_register(struct wiphy *wiphy,
struct wireless_dev *wdev,
u16 frame_type, bool reg)
......@@ -3447,6 +3456,7 @@ static struct cfg80211_ops ath6kl_cfg80211_ops = {
.cancel_remain_on_channel = ath6kl_cancel_remain_on_channel,
.mgmt_tx = ath6kl_mgmt_tx,
.mgmt_frame_register = ath6kl_mgmt_frame_register,
.get_antenna = ath6kl_get_antenna,
.sched_scan_start = ath6kl_cfg80211_sscan_start,
.sched_scan_stop = ath6kl_cfg80211_sscan_stop,
.set_bitrate_mask = ath6kl_cfg80211_set_bitrate,
......@@ -3634,6 +3644,127 @@ void ath6kl_cfg80211_vif_cleanup(struct ath6kl_vif *vif)
ar->num_vif--;
}
static const char ath6kl_gstrings_sta_stats[][ETH_GSTRING_LEN] = {
/* Common stats names used by many drivers. */
"tx_pkts_nic", "tx_bytes_nic", "rx_pkts_nic", "rx_bytes_nic",
/* TX stats. */
"d_tx_ucast_pkts", "d_tx_bcast_pkts",
"d_tx_ucast_bytes", "d_tx_bcast_bytes",
"d_tx_rts_ok", "d_tx_error", "d_tx_fail",
"d_tx_retry", "d_tx_multi_retry", "d_tx_rts_fail",
"d_tx_tkip_counter_measures",
/* RX Stats. */
"d_rx_ucast_pkts", "d_rx_ucast_rate", "d_rx_bcast_pkts",
"d_rx_ucast_bytes", "d_rx_bcast_bytes", "d_rx_frag_pkt",
"d_rx_error", "d_rx_crc_err", "d_rx_keycache_miss",
"d_rx_decrypt_crc_err", "d_rx_duplicate_frames",
"d_rx_mic_err", "d_rx_tkip_format_err", "d_rx_ccmp_format_err",
"d_rx_ccmp_replay_err",
/* Misc stats. */
"d_beacon_miss", "d_num_connects", "d_num_disconnects",
"d_beacon_avg_rssi", "d_arp_received", "d_arp_matched",
"d_arp_replied"
};
#define ATH6KL_STATS_LEN ARRAY_SIZE(ath6kl_gstrings_sta_stats)
static int ath6kl_get_sset_count(struct net_device *dev, int sset)
{
int rv = 0;
if (sset == ETH_SS_STATS)
rv += ATH6KL_STATS_LEN;
if (rv == 0)
return -EOPNOTSUPP;
return rv;
}
static void ath6kl_get_stats(struct net_device *dev,
struct ethtool_stats *stats,
u64 *data)
{
struct ath6kl_vif *vif = netdev_priv(dev);
struct ath6kl *ar = vif->ar;
int i = 0;
struct target_stats *tgt_stats;
memset(data, 0, sizeof(u64) * ATH6KL_STATS_LEN);
ath6kl_read_tgt_stats(ar, vif);
tgt_stats = &vif->target_stats;
data[i++] = tgt_stats->tx_ucast_pkt + tgt_stats->tx_bcast_pkt;
data[i++] = tgt_stats->tx_ucast_byte + tgt_stats->tx_bcast_byte;
data[i++] = tgt_stats->rx_ucast_pkt + tgt_stats->rx_bcast_pkt;
data[i++] = tgt_stats->rx_ucast_byte + tgt_stats->rx_bcast_byte;
data[i++] = tgt_stats->tx_ucast_pkt;
data[i++] = tgt_stats->tx_bcast_pkt;
data[i++] = tgt_stats->tx_ucast_byte;
data[i++] = tgt_stats->tx_bcast_byte;
data[i++] = tgt_stats->tx_rts_success_cnt;
data[i++] = tgt_stats->tx_err;
data[i++] = tgt_stats->tx_fail_cnt;
data[i++] = tgt_stats->tx_retry_cnt;
data[i++] = tgt_stats->tx_mult_retry_cnt;
data[i++] = tgt_stats->tx_rts_fail_cnt;
data[i++] = tgt_stats->tkip_cnter_measures_invoked;
data[i++] = tgt_stats->rx_ucast_pkt;
data[i++] = tgt_stats->rx_ucast_rate;
data[i++] = tgt_stats->rx_bcast_pkt;
data[i++] = tgt_stats->rx_ucast_byte;
data[i++] = tgt_stats->rx_bcast_byte;
data[i++] = tgt_stats->rx_frgment_pkt;
data[i++] = tgt_stats->rx_err;
data[i++] = tgt_stats->rx_crc_err;
data[i++] = tgt_stats->rx_key_cache_miss;
data[i++] = tgt_stats->rx_decrypt_err;
data[i++] = tgt_stats->rx_dupl_frame;
data[i++] = tgt_stats->tkip_local_mic_fail;
data[i++] = tgt_stats->tkip_fmt_err;
data[i++] = tgt_stats->ccmp_fmt_err;
data[i++] = tgt_stats->ccmp_replays;
data[i++] = tgt_stats->cs_bmiss_cnt;
data[i++] = tgt_stats->cs_connect_cnt;
data[i++] = tgt_stats->cs_discon_cnt;
data[i++] = tgt_stats->cs_ave_beacon_rssi;
data[i++] = tgt_stats->arp_received;
data[i++] = tgt_stats->arp_matched;
data[i++] = tgt_stats->arp_replied;
if (i != ATH6KL_STATS_LEN) {
WARN_ON_ONCE(1);
ath6kl_err("ethtool stats error, i: %d STATS_LEN: %d\n",
i, (int)ATH6KL_STATS_LEN);
}
}
/* These stats are per NIC, not really per vdev, so we just ignore dev. */
static void ath6kl_get_strings(struct net_device *dev, u32 sset, u8 *data)
{
int sz_sta_stats = 0;
if (sset == ETH_SS_STATS) {
sz_sta_stats = sizeof(ath6kl_gstrings_sta_stats);
memcpy(data, ath6kl_gstrings_sta_stats, sz_sta_stats);
}
}
static const struct ethtool_ops ath6kl_ethtool_ops = {
.get_drvinfo = cfg80211_get_drvinfo,
.get_link = ethtool_op_get_link,
.get_strings = ath6kl_get_strings,
.get_ethtool_stats = ath6kl_get_stats,
.get_sset_count = ath6kl_get_sset_count,
};
struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
unsigned char name_assign_type,
enum nl80211_iftype type,
......@@ -3679,6 +3810,8 @@ struct wireless_dev *ath6kl_interface_add(struct ath6kl *ar, const char *name,
if (ath6kl_cfg80211_vif_init(vif))
goto err;
netdev_set_default_ethtool_ops(ndev, &ath6kl_ethtool_ops);
if (register_netdevice(ndev))
goto err;
......@@ -3786,6 +3919,9 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
ath6kl_band_2ghz.ht_cap.ht_supported = false;
ath6kl_band_5ghz.ht_cap.cap = 0;
ath6kl_band_5ghz.ht_cap.ht_supported = false;
if (ht)
ath6kl_err("Firmware lacks RSN-CAP-OVERRIDE, so HT (802.11n) is disabled.");
}
if (test_bit(ATH6KL_FW_CAPABILITY_64BIT_RATES,
......@@ -3794,11 +3930,18 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
ath6kl_band_5ghz.ht_cap.mcs.rx_mask[0] = 0xff;
ath6kl_band_2ghz.ht_cap.mcs.rx_mask[1] = 0xff;
ath6kl_band_5ghz.ht_cap.mcs.rx_mask[1] = 0xff;
ar->hw.tx_ant = 2;
ar->hw.rx_ant = 2;
} else {
ath6kl_band_2ghz.ht_cap.mcs.rx_mask[0] = 0xff;
ath6kl_band_5ghz.ht_cap.mcs.rx_mask[0] = 0xff;
ar->hw.tx_ant = 1;
ar->hw.rx_ant = 1;
}
wiphy->available_antennas_tx = ar->hw.tx_ant;
wiphy->available_antennas_rx = ar->hw.rx_ant;
if (band_2gig)
wiphy->bands[IEEE80211_BAND_2GHZ] = &ath6kl_band_2ghz;
if (band_5gig)
......
......@@ -782,6 +782,8 @@ struct ath6kl {
u32 refclk_hz;
u32 uarttx_pin;
u32 testscript_addr;
u8 tx_ant;
u8 rx_ant;
enum wmi_phy_cap cap;
u32 flags;
......
......@@ -98,6 +98,33 @@ void ath6kl_warn(const char *fmt, ...)
}
EXPORT_SYMBOL(ath6kl_warn);
int ath6kl_read_tgt_stats(struct ath6kl *ar, struct ath6kl_vif *vif)
{
long left;
if (down_interruptible(&ar->sem))
return -EBUSY;
set_bit(STATS_UPDATE_PEND, &vif->flags);
if (ath6kl_wmi_get_stats_cmd(ar->wmi, 0)) {
up(&ar->sem);
return -EIO;
}
left = wait_event_interruptible_timeout(ar->event_wq,
!test_bit(STATS_UPDATE_PEND,
&vif->flags), WMI_TIMEOUT);
up(&ar->sem);
if (left <= 0)
return -ETIMEDOUT;
return 0;
}
EXPORT_SYMBOL(ath6kl_read_tgt_stats);
#ifdef CONFIG_ATH6KL_DEBUG
void ath6kl_dbg(enum ATH6K_DEBUG_MASK mask, const char *fmt, ...)
......@@ -544,42 +571,24 @@ static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf,
char *buf;
unsigned int len = 0, buf_len = 1500;
int i;
long left;
ssize_t ret_cnt;
int rv;
vif = ath6kl_vif_first(ar);
if (!vif)
return -EIO;
tgt_stats = &vif->target_stats;
buf = kzalloc(buf_len, GFP_KERNEL);
if (!buf)
return -ENOMEM;
if (down_interruptible(&ar->sem)) {
rv = ath6kl_read_tgt_stats(ar, vif);
if (rv < 0) {
kfree(buf);
return -EBUSY;
return rv;
}
set_bit(STATS_UPDATE_PEND, &vif->flags);
if (ath6kl_wmi_get_stats_cmd(ar->wmi, 0)) {
up(&ar->sem);
kfree(buf);
return -EIO;
}
left = wait_event_interruptible_timeout(ar->event_wq,
!test_bit(STATS_UPDATE_PEND,
&vif->flags), WMI_TIMEOUT);
up(&ar->sem);
if (left <= 0) {
kfree(buf);
return -ETIMEDOUT;
}
tgt_stats = &vif->target_stats;
len += scnprintf(buf + len, buf_len - len, "\n");
len += scnprintf(buf + len, buf_len - len, "%25s\n",
......
......@@ -59,6 +59,8 @@ enum ath6kl_war {
ATH6KL_WAR_INVALID_RATE,
};
int ath6kl_read_tgt_stats(struct ath6kl *ar, struct ath6kl_vif *vif);
#ifdef CONFIG_ATH6KL_DEBUG
void ath6kl_dbg(enum ATH6K_DEBUG_MASK mask, const char *fmt, ...);
......
......@@ -994,7 +994,7 @@ static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name)
switch (ie_id) {
case ATH6KL_FW_IE_FW_VERSION:
strlcpy(ar->wiphy->fw_version, data,
sizeof(ar->wiphy->fw_version));
min(sizeof(ar->wiphy->fw_version), ie_len+1));
ath6kl_dbg(ATH6KL_DBG_BOOT,
"found fw version %s\n",
......
......@@ -236,7 +236,7 @@ static irqreturn_t wil6210_irq_rx(int irq, void *cookie)
isr &= ~(BIT_DMA_EP_RX_ICR_RX_DONE |
BIT_DMA_EP_RX_ICR_RX_HTRSH);
if (likely(test_bit(wil_status_reset_done, wil->status))) {
if (likely(test_bit(wil_status_fwready, wil->status))) {
if (likely(test_bit(wil_status_napi_en, wil->status))) {
wil_dbg_txrx(wil, "NAPI(Rx) schedule\n");
need_unmask = false;
......@@ -286,7 +286,7 @@ static irqreturn_t wil6210_irq_tx(int irq, void *cookie)
isr &= ~BIT_DMA_EP_TX_ICR_TX_DONE;
/* clear also all VRING interrupts */
isr &= ~(BIT(25) - 1UL);
if (likely(test_bit(wil_status_reset_done, wil->status))) {
if (likely(test_bit(wil_status_fwready, wil->status))) {
wil_dbg_txrx(wil, "NAPI(Tx) schedule\n");
need_unmask = false;
napi_schedule(&wil->napi_tx);
......@@ -364,7 +364,7 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
if (isr & ISR_MISC_FW_READY) {
wil_dbg_irq(wil, "IRQ: FW ready\n");
wil_cache_mbox_regs(wil);
set_bit(wil_status_reset_done, wil->status);
set_bit(wil_status_mbox_ready, wil->status);
/**
* Actual FW ready indicated by the
* WMI_FW_READY_EVENTID
......
......@@ -422,7 +422,7 @@ static void wil_connect_worker(struct work_struct *work)
wil->sta[cid].status = wil_sta_connected;
netif_tx_wake_all_queues(ndev);
} else {
wil->sta[cid].status = wil_sta_unused;
wil_disconnect_cid(wil, cid, WLAN_REASON_UNSPECIFIED, true);
}
}
......
......@@ -1242,6 +1242,7 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring,
int tcp_hdr_len;
int skb_net_hdr_len;
int gso_type;
int rc = -EINVAL;
wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n",
__func__, skb->len, vring_index);
......@@ -1333,8 +1334,9 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring,
len, rem_data, descs_used);
if (descs_used == avail) {
wil_err(wil, "TSO: ring overflow\n");
goto dma_error;
wil_err_ratelimited(wil, "TSO: ring overflow\n");
rc = -ENOMEM;
goto mem_error;
}
lenmss = min_t(int, rem_data, len);
......@@ -1356,8 +1358,10 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring,
headlen -= lenmss;
}
if (unlikely(dma_mapping_error(dev, pa)))
goto dma_error;
if (unlikely(dma_mapping_error(dev, pa))) {
wil_err(wil, "TSO: DMA map page error\n");
goto mem_error;
}
_desc = &vring->va[i].tx;
......@@ -1456,8 +1460,8 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring,
}
/* advance swhead */
wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead);
wil_vring_advance_head(vring, descs_used);
wil_dbg_txrx(wil, "TSO: Tx swhead %d -> %d\n", swhead, vring->swhead);
/* make sure all writes to descriptors (shared memory) are done before
* committing them to HW
......@@ -1467,8 +1471,7 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring,
wil_w(wil, vring->hwtail, vring->swhead);
return 0;
dma_error:
wil_err(wil, "TSO: DMA map page error\n");
mem_error:
while (descs_used > 0) {
struct wil_ctx *ctx;
......@@ -1479,14 +1482,11 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring,
_desc->dma.status = TX_DMA_STATUS_DU;
ctx = &vring->ctx[i];
wil_txdesc_unmap(dev, d, ctx);
if (ctx->skb)
dev_kfree_skb_any(ctx->skb);
memset(ctx, 0, sizeof(*ctx));
descs_used--;
}
err_exit:
return -EINVAL;
return rc;
}
static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
......@@ -1562,8 +1562,11 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
_d = &vring->va[i].tx;
pa = skb_frag_dma_map(dev, frag, 0, skb_frag_size(frag),
DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(dev, pa)))
if (unlikely(dma_mapping_error(dev, pa))) {
wil_err(wil, "Tx[%2d] failed to map fragment\n",
vring_index);
goto dma_error;
}
vring->ctx[i].mapped_as = wil_mapped_as_page;
wil_tx_desc_map(d, pa, len, vring_index);
/* no need to check return code -
......@@ -1623,9 +1626,6 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
_d->dma.status = TX_DMA_STATUS_DU;
wil_txdesc_unmap(dev, d, ctx);
if (ctx->skb)
dev_kfree_skb_any(ctx->skb);
memset(ctx, 0, sizeof(*ctx));
}
......
......@@ -402,11 +402,11 @@ struct vring_tx_data {
};
enum { /* for wil6210_priv.status */
wil_status_fwready = 0,
wil_status_fwready = 0, /* FW operational */
wil_status_fwconnecting,
wil_status_fwconnected,
wil_status_dontscan,
wil_status_reset_done,
wil_status_mbox_ready, /* MBOX structures ready */
wil_status_irqen, /* FIXME: interrupts enabled - for debug */
wil_status_napi_en, /* NAPI enabled protected by wil->mutex */
wil_status_resetting, /* reset in progress */
......
......@@ -293,12 +293,6 @@ static void wmi_evt_ready(struct wil6210_priv *wil, int id, void *d, int len)
/* ignore MAC address, we already have it from the boot loader */
snprintf(wdev->wiphy->fw_version, sizeof(wdev->wiphy->fw_version),
"%d", wil->fw_version);
}
static void wmi_evt_fw_ready(struct wil6210_priv *wil, int id, void *d,
int len)
{
wil_dbg_wmi(wil, "WMI: got FW ready event\n");
wil_set_recovery_state(wil, fw_recovery_idle);
set_bit(wil_status_fwready, wil->status);
......@@ -684,13 +678,22 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
spin_unlock_bh(&sta->tid_rx_lock);
}
/**
* Some events are ignored for purpose; and need not be interpreted as
* "unhandled events"
*/
static void wmi_evt_ignore(struct wil6210_priv *wil, int id, void *d, int len)
{
wil_dbg_wmi(wil, "Ignore event 0x%04x len %d\n", id, len);
}
static const struct {
int eventid;
void (*handler)(struct wil6210_priv *wil, int eventid,
void *data, int data_len);
} wmi_evt_handlers[] = {
{WMI_READY_EVENTID, wmi_evt_ready},
{WMI_FW_READY_EVENTID, wmi_evt_fw_ready},
{WMI_FW_READY_EVENTID, wmi_evt_ignore},
{WMI_RX_MGMT_PACKET_EVENTID, wmi_evt_rx_mgmt},
{WMI_TX_MGMT_PACKET_EVENTID, wmi_evt_tx_mgmt},
{WMI_SCAN_COMPLETE_EVENTID, wmi_evt_scan_complete},
......@@ -701,6 +704,7 @@ static const struct {
{WMI_RCP_ADDBA_REQ_EVENTID, wmi_evt_addba_rx_req},
{WMI_DELBA_EVENTID, wmi_evt_delba},
{WMI_VRING_EN_EVENTID, wmi_evt_vring_en},
{WMI_DATA_PORT_OPEN_EVENTID, wmi_evt_ignore},
};
/*
......@@ -720,7 +724,7 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
ulong flags;
unsigned n;
if (!test_bit(wil_status_reset_done, wil->status)) {
if (!test_bit(wil_status_mbox_ready, wil->status)) {
wil_err(wil, "Reset in progress. Cannot handle WMI event\n");
return;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment