Commit c8ac18f2 authored by David S. Miller's avatar David S. Miller

Merge tag 'wireless-drivers-next-for-davem-2015-02-07' of...

Merge tag 'wireless-drivers-next-for-davem-2015-02-07' of git://git.kernel.org/pub/scm/linux/kernel/git/kvalo/wireless-drivers-next

Major changes:

iwlwifi:

* more work for new devices (4165 / 8260)
* cleanups / improvemnts in rate control
* fixes for TDLS
* major statistics work from Johannes - more to come
* improvements for the fw error dump infrastructure
* usual amount of small fixes here and there (scan, D0i3 etc...)
* add support for beamforming
* enable stuck queue detection for iwlmvm
* a few fixes for EBS scan
* fixes for various failure paths
* improvements for TDLS Offchannel

wil6210:

* performance tuning
* some AP features

brcm80211:

* rework some code in SDIO part of the brcmfmac driver related to
  suspend/resume that were found doing stress testing
* in PCIe part scheduling of worker thread needed to be relaxed
* minor fixes and exposing firmware revision information to
  user-space, ie. ethtool.

mwifiex:

* enhancements for change virtual interface handling
* remove coupling between netdev and FW supported interface
  combination, now conversion from any type of supported interface
  types to any other type is possible
* DFS support in AP mode

ath9k:

* fix calibration issues on some boards
* Wake-on-WLAN improvements

ath10k:

* add support for qca6174 hardware
* enable RX batching to reduce CPU load

Conflicts:
	drivers/net/wireless/rtlwifi/pci.c

Conflict resolution is to get rid of the 'end' label and keep
the rest.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 93c1af6c d5307114
...@@ -107,6 +107,14 @@ extern int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc); ...@@ -107,6 +107,14 @@ extern int bcma_chipco_watchdog_register(struct bcma_drv_cc *cc);
#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE #ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE
bool bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc); bool bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc);
void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc); void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc);
#else
static inline bool bcma_core_pci_is_in_hostmode(struct bcma_drv_pci *pc)
{
return false;
}
static inline void bcma_core_pci_hostmode_init(struct bcma_drv_pci *pc)
{
}
#endif /* CONFIG_BCMA_DRIVER_PCI_HOSTMODE */ #endif /* CONFIG_BCMA_DRIVER_PCI_HOSTMODE */
#ifdef CONFIG_BCMA_DRIVER_GPIO #ifdef CONFIG_BCMA_DRIVER_GPIO
......
...@@ -178,7 +178,6 @@ void bcma_core_chipcommon_init(struct bcma_drv_cc *cc) ...@@ -178,7 +178,6 @@ void bcma_core_chipcommon_init(struct bcma_drv_cc *cc)
u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks) u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks)
{ {
u32 maxt; u32 maxt;
enum bcma_clkmode clkmode;
maxt = bcma_chipco_watchdog_get_max_timer(cc); maxt = bcma_chipco_watchdog_get_max_timer(cc);
if (cc->capabilities & BCMA_CC_CAP_PMU) { if (cc->capabilities & BCMA_CC_CAP_PMU) {
...@@ -188,8 +187,13 @@ u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks) ...@@ -188,8 +187,13 @@ u32 bcma_chipco_watchdog_timer_set(struct bcma_drv_cc *cc, u32 ticks)
ticks = maxt; ticks = maxt;
bcma_cc_write32(cc, BCMA_CC_PMU_WATCHDOG, ticks); bcma_cc_write32(cc, BCMA_CC_PMU_WATCHDOG, ticks);
} else { } else {
clkmode = ticks ? BCMA_CLKMODE_FAST : BCMA_CLKMODE_DYNAMIC; struct bcma_bus *bus = cc->core->bus;
bcma_core_set_clockmode(cc->core, clkmode);
if (bus->chipinfo.id != BCMA_CHIP_ID_BCM4707 &&
bus->chipinfo.id != BCMA_CHIP_ID_BCM53018)
bcma_core_set_clockmode(cc->core,
ticks ? BCMA_CLKMODE_FAST : BCMA_CLKMODE_DYNAMIC);
if (ticks > maxt) if (ticks > maxt)
ticks = maxt; ticks = maxt;
/* instant NMI */ /* instant NMI */
......
...@@ -144,6 +144,47 @@ static u16 bcma_pcie_mdio_writeread(struct bcma_drv_pci *pc, u16 device, ...@@ -144,6 +144,47 @@ static u16 bcma_pcie_mdio_writeread(struct bcma_drv_pci *pc, u16 device,
return bcma_pcie_mdio_read(pc, device, address); return bcma_pcie_mdio_read(pc, device, address);
} }
/**************************************************
* Early init.
**************************************************/
static void bcma_core_pci_fixcfg(struct bcma_drv_pci *pc)
{
struct bcma_device *core = pc->core;
u16 val16, core_index;
uint regoff;
regoff = BCMA_CORE_PCI_SPROM(BCMA_CORE_PCI_SPROM_PI_OFFSET);
core_index = (u16)core->core_index;
val16 = pcicore_read16(pc, regoff);
if (((val16 & BCMA_CORE_PCI_SPROM_PI_MASK) >> BCMA_CORE_PCI_SPROM_PI_SHIFT)
!= core_index) {
val16 = (core_index << BCMA_CORE_PCI_SPROM_PI_SHIFT) |
(val16 & ~BCMA_CORE_PCI_SPROM_PI_MASK);
pcicore_write16(pc, regoff, val16);
}
}
/*
* Apply some early fixes required before accessing SPROM.
* See also si_pci_fixcfg.
*/
void bcma_core_pci_early_init(struct bcma_drv_pci *pc)
{
if (pc->early_setup_done)
return;
pc->hostmode = bcma_core_pci_is_in_hostmode(pc);
if (pc->hostmode)
goto out;
bcma_core_pci_fixcfg(pc);
out:
pc->early_setup_done = true;
}
/************************************************** /**************************************************
* Workarounds. * Workarounds.
**************************************************/ **************************************************/
...@@ -175,24 +216,6 @@ static void bcma_pcicore_serdes_workaround(struct bcma_drv_pci *pc) ...@@ -175,24 +216,6 @@ static void bcma_pcicore_serdes_workaround(struct bcma_drv_pci *pc)
tmp & ~BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN); tmp & ~BCMA_CORE_PCI_PLL_CTRL_FREQDET_EN);
} }
static void bcma_core_pci_fixcfg(struct bcma_drv_pci *pc)
{
struct bcma_device *core = pc->core;
u16 val16, core_index;
uint regoff;
regoff = BCMA_CORE_PCI_SPROM(BCMA_CORE_PCI_SPROM_PI_OFFSET);
core_index = (u16)core->core_index;
val16 = pcicore_read16(pc, regoff);
if (((val16 & BCMA_CORE_PCI_SPROM_PI_MASK) >> BCMA_CORE_PCI_SPROM_PI_SHIFT)
!= core_index) {
val16 = (core_index << BCMA_CORE_PCI_SPROM_PI_SHIFT) |
(val16 & ~BCMA_CORE_PCI_SPROM_PI_MASK);
pcicore_write16(pc, regoff, val16);
}
}
/* Fix MISC config to allow coming out of L2/L3-Ready state w/o PRST */ /* Fix MISC config to allow coming out of L2/L3-Ready state w/o PRST */
/* Needs to happen when coming out of 'standby'/'hibernate' */ /* Needs to happen when coming out of 'standby'/'hibernate' */
static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc) static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc)
...@@ -216,7 +239,6 @@ static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc) ...@@ -216,7 +239,6 @@ static void bcma_core_pci_config_fixup(struct bcma_drv_pci *pc)
static void bcma_core_pci_clientmode_init(struct bcma_drv_pci *pc) static void bcma_core_pci_clientmode_init(struct bcma_drv_pci *pc)
{ {
bcma_core_pci_fixcfg(pc);
bcma_pcicore_serdes_workaround(pc); bcma_pcicore_serdes_workaround(pc);
bcma_core_pci_config_fixup(pc); bcma_core_pci_config_fixup(pc);
} }
...@@ -226,13 +248,11 @@ void bcma_core_pci_init(struct bcma_drv_pci *pc) ...@@ -226,13 +248,11 @@ void bcma_core_pci_init(struct bcma_drv_pci *pc)
if (pc->setup_done) if (pc->setup_done)
return; return;
#ifdef CONFIG_BCMA_DRIVER_PCI_HOSTMODE bcma_core_pci_early_init(pc);
pc->hostmode = bcma_core_pci_is_in_hostmode(pc);
if (pc->hostmode) if (pc->hostmode)
bcma_core_pci_hostmode_init(pc); bcma_core_pci_hostmode_init(pc);
#endif /* CONFIG_BCMA_DRIVER_PCI_HOSTMODE */ else
if (!pc->hostmode)
bcma_core_pci_clientmode_init(pc); bcma_core_pci_clientmode_init(pc);
} }
......
...@@ -13,10 +13,12 @@ ...@@ -13,10 +13,12 @@
static void bcma_host_pci_switch_core(struct bcma_device *core) static void bcma_host_pci_switch_core(struct bcma_device *core)
{ {
int win2 = core->bus->host_is_pcie2 ?
BCMA_PCIE2_BAR0_WIN2 : BCMA_PCI_BAR0_WIN2;
pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN, pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN,
core->addr); core->addr);
pci_write_config_dword(core->bus->host_pci, BCMA_PCI_BAR0_WIN2, pci_write_config_dword(core->bus->host_pci, win2, core->wrap);
core->wrap);
core->bus->mapped_core = core; core->bus->mapped_core = core;
bcma_debug(core->bus, "Switched to core: 0x%X\n", core->id.id); bcma_debug(core->bus, "Switched to core: 0x%X\n", core->id.id);
} }
......
...@@ -368,12 +368,19 @@ static void bcma_unregister_cores(struct bcma_bus *bus) ...@@ -368,12 +368,19 @@ static void bcma_unregister_cores(struct bcma_bus *bus)
struct bcma_device *core, *tmp; struct bcma_device *core, *tmp;
list_for_each_entry_safe(core, tmp, &bus->cores, list) { list_for_each_entry_safe(core, tmp, &bus->cores, list) {
if (!core->dev_registered)
continue;
list_del(&core->list); list_del(&core->list);
if (core->dev_registered) device_unregister(&core->dev);
device_unregister(&core->dev);
} }
if (bus->hosttype == BCMA_HOSTTYPE_SOC) if (bus->hosttype == BCMA_HOSTTYPE_SOC)
platform_device_unregister(bus->drv_cc.watchdog); platform_device_unregister(bus->drv_cc.watchdog);
/* Now noone uses internally-handled cores, we can free them */
list_for_each_entry_safe(core, tmp, &bus->cores, list) {
list_del(&core->list);
kfree(core);
}
} }
int bcma_bus_register(struct bcma_bus *bus) int bcma_bus_register(struct bcma_bus *bus)
...@@ -395,6 +402,13 @@ int bcma_bus_register(struct bcma_bus *bus) ...@@ -395,6 +402,13 @@ int bcma_bus_register(struct bcma_bus *bus)
bcma_core_chipcommon_early_init(&bus->drv_cc); bcma_core_chipcommon_early_init(&bus->drv_cc);
} }
/* Early init PCIE core */
core = bcma_find_core(bus, BCMA_CORE_PCIE);
if (core) {
bus->drv_pci[0].core = core;
bcma_core_pci_early_init(&bus->drv_pci[0]);
}
/* Cores providing flash access go before SPROM init */ /* Cores providing flash access go before SPROM init */
list_for_each_entry(core, &bus->cores, list) { list_for_each_entry(core, &bus->cores, list) {
if (bcma_is_core_needed_early(core->id.id)) if (bcma_is_core_needed_early(core->id.id))
...@@ -467,7 +481,6 @@ int bcma_bus_register(struct bcma_bus *bus) ...@@ -467,7 +481,6 @@ int bcma_bus_register(struct bcma_bus *bus)
void bcma_bus_unregister(struct bcma_bus *bus) void bcma_bus_unregister(struct bcma_bus *bus)
{ {
struct bcma_device *cores[3];
int err; int err;
err = bcma_gpio_unregister(&bus->drv_cc); err = bcma_gpio_unregister(&bus->drv_cc);
...@@ -478,15 +491,7 @@ void bcma_bus_unregister(struct bcma_bus *bus) ...@@ -478,15 +491,7 @@ void bcma_bus_unregister(struct bcma_bus *bus)
bcma_core_chipcommon_b_free(&bus->drv_cc_b); bcma_core_chipcommon_b_free(&bus->drv_cc_b);
cores[0] = bcma_find_core(bus, BCMA_CORE_MIPS_74K);
cores[1] = bcma_find_core(bus, BCMA_CORE_PCIE);
cores[2] = bcma_find_core(bus, BCMA_CORE_4706_MAC_GBIT_COMMON);
bcma_unregister_cores(bus); bcma_unregister_cores(bus);
kfree(cores[2]);
kfree(cores[1]);
kfree(cores[0]);
} }
/* /*
......
...@@ -579,7 +579,8 @@ int bcma_sprom_get(struct bcma_bus *bus) ...@@ -579,7 +579,8 @@ int bcma_sprom_get(struct bcma_bus *bus)
u16 offset = BCMA_CC_SPROM; u16 offset = BCMA_CC_SPROM;
u16 *sprom; u16 *sprom;
size_t sprom_sizes[] = { SSB_SPROMSIZE_WORDS_R4, size_t sprom_sizes[] = { SSB_SPROMSIZE_WORDS_R4,
SSB_SPROMSIZE_WORDS_R10, }; SSB_SPROMSIZE_WORDS_R10,
SSB_SPROMSIZE_WORDS_R11, };
int i, err = 0; int i, err = 0;
if (!bus->drv_cc.core) if (!bus->drv_cc.core)
......
...@@ -64,6 +64,7 @@ enum ath_op_flags { ...@@ -64,6 +64,7 @@ enum ath_op_flags {
ATH_OP_HW_RESET, ATH_OP_HW_RESET,
ATH_OP_SCANNING, ATH_OP_SCANNING,
ATH_OP_MULTI_CHANNEL, ATH_OP_MULTI_CHANNEL,
ATH_OP_WOW_ENABLED,
}; };
enum ath_bus_type { enum ath_bus_type {
......
...@@ -9,12 +9,14 @@ ath10k_core-y += mac.o \ ...@@ -9,12 +9,14 @@ ath10k_core-y += mac.o \
txrx.o \ txrx.o \
wmi.o \ wmi.o \
wmi-tlv.o \ wmi-tlv.o \
bmi.o bmi.o \
hw.o
ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o ath10k_core-$(CONFIG_ATH10K_DEBUGFS) += spectral.o
ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o ath10k_core-$(CONFIG_NL80211_TESTMODE) += testmode.o
ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o ath10k_core-$(CONFIG_ATH10K_TRACING) += trace.o
ath10k_core-$(CONFIG_THERMAL) += thermal.o ath10k_core-$(CONFIG_THERMAL) += thermal.o
ath10k_core-$(CONFIG_MAC80211_DEBUGFS) += debugfs_sta.o
obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o obj-$(CONFIG_ATH10K_PCI) += ath10k_pci.o
ath10k_pci-y += pci.o \ ath10k_pci-y += pci.o \
......
...@@ -803,7 +803,7 @@ int ath10k_ce_disable_interrupts(struct ath10k *ar) ...@@ -803,7 +803,7 @@ int ath10k_ce_disable_interrupts(struct ath10k *ar)
int ce_id; int ce_id;
for (ce_id = 0; ce_id < CE_COUNT; ce_id++) { for (ce_id = 0; ce_id < CE_COUNT; ce_id++) {
u32 ctrl_addr = ath10k_ce_base_address(ce_id); u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr); ath10k_ce_copy_complete_intr_disable(ar, ctrl_addr);
ath10k_ce_error_intr_disable(ar, ctrl_addr); ath10k_ce_error_intr_disable(ar, ctrl_addr);
...@@ -832,7 +832,7 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar, ...@@ -832,7 +832,7 @@ static int ath10k_ce_init_src_ring(struct ath10k *ar,
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
struct ath10k_ce_ring *src_ring = ce_state->src_ring; struct ath10k_ce_ring *src_ring = ce_state->src_ring;
u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id); u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
nentries = roundup_pow_of_two(attr->src_nentries); nentries = roundup_pow_of_two(attr->src_nentries);
...@@ -869,7 +869,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar, ...@@ -869,7 +869,7 @@ static int ath10k_ce_init_dest_ring(struct ath10k *ar,
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id]; struct ath10k_ce_pipe *ce_state = &ar_pci->ce_states[ce_id];
struct ath10k_ce_ring *dest_ring = ce_state->dest_ring; struct ath10k_ce_ring *dest_ring = ce_state->dest_ring;
u32 nentries, ctrl_addr = ath10k_ce_base_address(ce_id); u32 nentries, ctrl_addr = ath10k_ce_base_address(ar, ce_id);
nentries = roundup_pow_of_two(attr->dest_nentries); nentries = roundup_pow_of_two(attr->dest_nentries);
...@@ -1051,7 +1051,7 @@ int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id, ...@@ -1051,7 +1051,7 @@ int ath10k_ce_init_pipe(struct ath10k *ar, unsigned int ce_id,
static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id) static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
{ {
u32 ctrl_addr = ath10k_ce_base_address(ce_id); u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0); ath10k_ce_src_ring_base_addr_set(ar, ctrl_addr, 0);
ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0); ath10k_ce_src_ring_size_set(ar, ctrl_addr, 0);
...@@ -1061,7 +1061,7 @@ static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id) ...@@ -1061,7 +1061,7 @@ static void ath10k_ce_deinit_src_ring(struct ath10k *ar, unsigned int ce_id)
static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id) static void ath10k_ce_deinit_dest_ring(struct ath10k *ar, unsigned int ce_id)
{ {
u32 ctrl_addr = ath10k_ce_base_address(ce_id); u32 ctrl_addr = ath10k_ce_base_address(ar, ce_id);
ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0); ath10k_ce_dest_ring_base_addr_set(ar, ctrl_addr, 0);
ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0); ath10k_ce_dest_ring_size_set(ar, ctrl_addr, 0);
...@@ -1098,7 +1098,7 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id, ...@@ -1098,7 +1098,7 @@ int ath10k_ce_alloc_pipe(struct ath10k *ar, int ce_id,
ce_state->ar = ar; ce_state->ar = ar;
ce_state->id = ce_id; ce_state->id = ce_id;
ce_state->ctrl_addr = ath10k_ce_base_address(ce_id); ce_state->ctrl_addr = ath10k_ce_base_address(ar, ce_id);
ce_state->attr_flags = attr->flags; ce_state->attr_flags = attr->flags;
ce_state->src_sz_max = attr->src_sz_max; ce_state->src_sz_max = attr->src_sz_max;
......
...@@ -394,7 +394,7 @@ struct ce_attr { ...@@ -394,7 +394,7 @@ struct ce_attr {
#define DST_WATERMARK_HIGH_RESET 0 #define DST_WATERMARK_HIGH_RESET 0
#define DST_WATERMARK_ADDRESS 0x0050 #define DST_WATERMARK_ADDRESS 0x0050
static inline u32 ath10k_ce_base_address(unsigned int ce_id) static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
{ {
return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id; return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id;
} }
......
...@@ -57,6 +57,49 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = { ...@@ -57,6 +57,49 @@ static const struct ath10k_hw_params ath10k_hw_params_list[] = {
.board_ext_size = QCA988X_BOARD_EXT_DATA_SZ, .board_ext_size = QCA988X_BOARD_EXT_DATA_SZ,
}, },
}, },
{
.id = QCA6174_HW_2_1_VERSION,
.name = "qca6174 hw2.1",
.patch_load_addr = QCA6174_HW_2_1_PATCH_LOAD_ADDR,
.uart_pin = 6,
.fw = {
.dir = QCA6174_HW_2_1_FW_DIR,
.fw = QCA6174_HW_2_1_FW_FILE,
.otp = QCA6174_HW_2_1_OTP_FILE,
.board = QCA6174_HW_2_1_BOARD_DATA_FILE,
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
},
{
.id = QCA6174_HW_3_0_VERSION,
.name = "qca6174 hw3.0",
.patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
.uart_pin = 6,
.fw = {
.dir = QCA6174_HW_3_0_FW_DIR,
.fw = QCA6174_HW_3_0_FW_FILE,
.otp = QCA6174_HW_3_0_OTP_FILE,
.board = QCA6174_HW_3_0_BOARD_DATA_FILE,
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
},
{
.id = QCA6174_HW_3_2_VERSION,
.name = "qca6174 hw3.2",
.patch_load_addr = QCA6174_HW_3_0_PATCH_LOAD_ADDR,
.uart_pin = 6,
.fw = {
/* uses same binaries as hw3.0 */
.dir = QCA6174_HW_3_0_FW_DIR,
.fw = QCA6174_HW_3_0_FW_FILE,
.otp = QCA6174_HW_3_0_OTP_FILE,
.board = QCA6174_HW_3_0_BOARD_DATA_FILE,
.board_size = QCA6174_BOARD_DATA_SZ,
.board_ext_size = QCA6174_BOARD_EXT_DATA_SZ,
},
},
}; };
static void ath10k_send_suspend_complete(struct ath10k *ar) static void ath10k_send_suspend_complete(struct ath10k *ar)
...@@ -927,6 +970,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar) ...@@ -927,6 +970,7 @@ static int ath10k_core_init_firmware_features(struct ath10k *ar)
case ATH10K_FW_WMI_OP_VERSION_TLV: case ATH10K_FW_WMI_OP_VERSION_TLV:
ar->max_num_peers = TARGET_TLV_NUM_PEERS; ar->max_num_peers = TARGET_TLV_NUM_PEERS;
ar->max_num_stations = TARGET_TLV_NUM_STATIONS; ar->max_num_stations = TARGET_TLV_NUM_STATIONS;
ar->max_num_vdevs = TARGET_TLV_NUM_VDEVS;
ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC; ar->htt.max_num_pending_tx = TARGET_TLV_NUM_MSDU_DESC;
break; break;
case ATH10K_FW_WMI_OP_VERSION_UNSET: case ATH10K_FW_WMI_OP_VERSION_UNSET:
...@@ -1060,6 +1104,18 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode) ...@@ -1060,6 +1104,18 @@ int ath10k_core_start(struct ath10k *ar, enum ath10k_firmware_mode mode)
goto err_hif_stop; goto err_hif_stop;
} }
/* If firmware indicates Full Rx Reorder support it must be used in a
* slightly different manner. Let HTT code know.
*/
ar->htt.rx_ring.in_ord_rx = !!(test_bit(WMI_SERVICE_RX_FULL_REORDER,
ar->wmi.svc_map));
status = ath10k_htt_rx_ring_refill(ar);
if (status) {
ath10k_err(ar, "failed to refill htt rx ring: %d\n", status);
goto err_hif_stop;
}
/* we don't care about HTT in UTF mode */ /* we don't care about HTT in UTF mode */
if (mode == ATH10K_FIRMWARE_MODE_NORMAL) { if (mode == ATH10K_FIRMWARE_MODE_NORMAL) {
status = ath10k_htt_setup(&ar->htt); status = ath10k_htt_setup(&ar->htt);
...@@ -1295,6 +1351,7 @@ EXPORT_SYMBOL(ath10k_core_unregister); ...@@ -1295,6 +1351,7 @@ EXPORT_SYMBOL(ath10k_core_unregister);
struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
enum ath10k_bus bus, enum ath10k_bus bus,
enum ath10k_hw_rev hw_rev,
const struct ath10k_hif_ops *hif_ops) const struct ath10k_hif_ops *hif_ops)
{ {
struct ath10k *ar; struct ath10k *ar;
...@@ -1307,9 +1364,24 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, ...@@ -1307,9 +1364,24 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
ar->ath_common.priv = ar; ar->ath_common.priv = ar;
ar->ath_common.hw = ar->hw; ar->ath_common.hw = ar->hw;
ar->dev = dev; ar->dev = dev;
ar->hw_rev = hw_rev;
ar->hif.ops = hif_ops; ar->hif.ops = hif_ops;
ar->hif.bus = bus; ar->hif.bus = bus;
switch (hw_rev) {
case ATH10K_HW_QCA988X:
ar->regs = &qca988x_regs;
break;
case ATH10K_HW_QCA6174:
ar->regs = &qca6174_regs;
break;
default:
ath10k_err(ar, "unsupported core hardware revision %d\n",
hw_rev);
ret = -ENOTSUPP;
goto err_free_mac;
}
init_completion(&ar->scan.started); init_completion(&ar->scan.started);
init_completion(&ar->scan.completed); init_completion(&ar->scan.completed);
init_completion(&ar->scan.on_channel); init_completion(&ar->scan.on_channel);
......
...@@ -97,6 +97,11 @@ struct ath10k_skb_cb { ...@@ -97,6 +97,11 @@ struct ath10k_skb_cb {
} bcn; } bcn;
} __packed; } __packed;
struct ath10k_skb_rxcb {
dma_addr_t paddr;
struct hlist_node hlist;
};
static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb) static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
{ {
BUILD_BUG_ON(sizeof(struct ath10k_skb_cb) > BUILD_BUG_ON(sizeof(struct ath10k_skb_cb) >
...@@ -104,6 +109,15 @@ static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb) ...@@ -104,6 +109,15 @@ static inline struct ath10k_skb_cb *ATH10K_SKB_CB(struct sk_buff *skb)
return (struct ath10k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data; return (struct ath10k_skb_cb *)&IEEE80211_SKB_CB(skb)->driver_data;
} }
static inline struct ath10k_skb_rxcb *ATH10K_SKB_RXCB(struct sk_buff *skb)
{
BUILD_BUG_ON(sizeof(struct ath10k_skb_rxcb) > sizeof(skb->cb));
return (struct ath10k_skb_rxcb *)skb->cb;
}
#define ATH10K_RXCB_SKB(rxcb) \
container_of((void *)rxcb, struct sk_buff, cb)
static inline u32 host_interest_item_address(u32 item_offset) static inline u32 host_interest_item_address(u32 item_offset)
{ {
return QCA988X_HOST_INTEREST_ADDRESS + item_offset; return QCA988X_HOST_INTEREST_ADDRESS + item_offset;
...@@ -239,10 +253,21 @@ struct ath10k_sta { ...@@ -239,10 +253,21 @@ struct ath10k_sta {
u32 smps; u32 smps;
struct work_struct update_wk; struct work_struct update_wk;
#ifdef CONFIG_MAC80211_DEBUGFS
/* protected by conf_mutex */
bool aggr_mode;
#endif
}; };
#define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ) #define ATH10K_VDEV_SETUP_TIMEOUT_HZ (5*HZ)
enum ath10k_beacon_state {
ATH10K_BEACON_SCHEDULED = 0,
ATH10K_BEACON_SENDING,
ATH10K_BEACON_SENT,
};
struct ath10k_vif { struct ath10k_vif {
struct list_head list; struct list_head list;
...@@ -253,7 +278,7 @@ struct ath10k_vif { ...@@ -253,7 +278,7 @@ struct ath10k_vif {
u32 dtim_period; u32 dtim_period;
struct sk_buff *beacon; struct sk_buff *beacon;
/* protected by data_lock */ /* protected by data_lock */
bool beacon_sent; enum ath10k_beacon_state beacon_state;
void *beacon_buf; void *beacon_buf;
dma_addr_t beacon_paddr; dma_addr_t beacon_paddr;
...@@ -266,10 +291,8 @@ struct ath10k_vif { ...@@ -266,10 +291,8 @@ struct ath10k_vif {
u32 aid; u32 aid;
u8 bssid[ETH_ALEN]; u8 bssid[ETH_ALEN];
struct work_struct wep_key_work;
struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1]; struct ieee80211_key_conf *wep_keys[WMI_MAX_KEY_INDEX + 1];
u8 def_wep_key_idx; s8 def_wep_key_idx;
u8 def_wep_key_newidx;
u16 tx_seq_no; u16 tx_seq_no;
...@@ -296,6 +319,7 @@ struct ath10k_vif { ...@@ -296,6 +319,7 @@ struct ath10k_vif {
bool use_cts_prot; bool use_cts_prot;
int num_legacy_stations; int num_legacy_stations;
int txpower; int txpower;
struct wmi_wmm_params_all_arg wmm_params;
}; };
struct ath10k_vif_iter { struct ath10k_vif_iter {
...@@ -326,6 +350,7 @@ struct ath10k_debug { ...@@ -326,6 +350,7 @@ struct ath10k_debug {
/* protected by conf_mutex */ /* protected by conf_mutex */
u32 fw_dbglog_mask; u32 fw_dbglog_mask;
u32 fw_dbglog_level;
u32 pktlog_filter; u32 pktlog_filter;
u32 reg_addr; u32 reg_addr;
u32 nf_cal_period; u32 nf_cal_period;
...@@ -452,6 +477,7 @@ struct ath10k { ...@@ -452,6 +477,7 @@ struct ath10k {
struct device *dev; struct device *dev;
u8 mac_addr[ETH_ALEN]; u8 mac_addr[ETH_ALEN];
enum ath10k_hw_rev hw_rev;
u32 chip_id; u32 chip_id;
u32 target_version; u32 target_version;
u8 fw_version_major; u8 fw_version_major;
...@@ -467,9 +493,6 @@ struct ath10k { ...@@ -467,9 +493,6 @@ struct ath10k {
DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT); DECLARE_BITMAP(fw_features, ATH10K_FW_FEATURE_COUNT);
struct targetdef *targetdef;
struct hostdef *hostdef;
bool p2p; bool p2p;
struct { struct {
...@@ -479,6 +502,7 @@ struct ath10k { ...@@ -479,6 +502,7 @@ struct ath10k {
struct completion target_suspend; struct completion target_suspend;
const struct ath10k_hw_regs *regs;
struct ath10k_bmi bmi; struct ath10k_bmi bmi;
struct ath10k_wmi wmi; struct ath10k_wmi wmi;
struct ath10k_htc htc; struct ath10k_htc htc;
...@@ -559,7 +583,6 @@ struct ath10k { ...@@ -559,7 +583,6 @@ struct ath10k {
u8 cfg_tx_chainmask; u8 cfg_tx_chainmask;
u8 cfg_rx_chainmask; u8 cfg_rx_chainmask;
struct wmi_pdev_set_wmm_params_arg wmm_params;
struct completion install_key_done; struct completion install_key_done;
struct completion vdev_setup_done; struct completion vdev_setup_done;
...@@ -643,6 +666,7 @@ struct ath10k { ...@@ -643,6 +666,7 @@ struct ath10k {
struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev, struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
enum ath10k_bus bus, enum ath10k_bus bus,
enum ath10k_hw_rev hw_rev,
const struct ath10k_hif_ops *hif_ops); const struct ath10k_hif_ops *hif_ops);
void ath10k_core_destroy(struct ath10k *ar); void ath10k_core_destroy(struct ath10k *ar);
......
...@@ -371,7 +371,7 @@ static int ath10k_debug_fw_stats_request(struct ath10k *ar) ...@@ -371,7 +371,7 @@ static int ath10k_debug_fw_stats_request(struct ath10k *ar)
ret = wait_for_completion_timeout(&ar->debug.fw_stats_complete, ret = wait_for_completion_timeout(&ar->debug.fw_stats_complete,
1*HZ); 1*HZ);
if (ret <= 0) if (ret == 0)
return -ETIMEDOUT; return -ETIMEDOUT;
spin_lock_bh(&ar->data_lock); spin_lock_bh(&ar->data_lock);
...@@ -1318,10 +1318,10 @@ static ssize_t ath10k_read_fw_dbglog(struct file *file, ...@@ -1318,10 +1318,10 @@ static ssize_t ath10k_read_fw_dbglog(struct file *file,
{ {
struct ath10k *ar = file->private_data; struct ath10k *ar = file->private_data;
unsigned int len; unsigned int len;
char buf[32]; char buf[64];
len = scnprintf(buf, sizeof(buf), "0x%08x\n", len = scnprintf(buf, sizeof(buf), "0x%08x %u\n",
ar->debug.fw_dbglog_mask); ar->debug.fw_dbglog_mask, ar->debug.fw_dbglog_level);
return simple_read_from_buffer(user_buf, count, ppos, buf, len); return simple_read_from_buffer(user_buf, count, ppos, buf, len);
} }
...@@ -1331,19 +1331,32 @@ static ssize_t ath10k_write_fw_dbglog(struct file *file, ...@@ -1331,19 +1331,32 @@ static ssize_t ath10k_write_fw_dbglog(struct file *file,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
struct ath10k *ar = file->private_data; struct ath10k *ar = file->private_data;
unsigned long mask;
int ret; int ret;
char buf[64];
unsigned int log_level, mask;
ret = kstrtoul_from_user(user_buf, count, 0, &mask); simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
if (ret)
return ret; /* make sure that buf is null terminated */
buf[sizeof(buf) - 1] = 0;
ret = sscanf(buf, "%x %u", &mask, &log_level);
if (!ret)
return -EINVAL;
if (ret == 1)
/* default if user did not specify */
log_level = ATH10K_DBGLOG_LEVEL_WARN;
mutex_lock(&ar->conf_mutex); mutex_lock(&ar->conf_mutex);
ar->debug.fw_dbglog_mask = mask; ar->debug.fw_dbglog_mask = mask;
ar->debug.fw_dbglog_level = log_level;
if (ar->state == ATH10K_STATE_ON) { if (ar->state == ATH10K_STATE_ON) {
ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask); ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask,
ar->debug.fw_dbglog_level);
if (ret) { if (ret) {
ath10k_warn(ar, "dbglog cfg failed from debugfs: %d\n", ath10k_warn(ar, "dbglog cfg failed from debugfs: %d\n",
ret); ret);
...@@ -1685,7 +1698,8 @@ int ath10k_debug_start(struct ath10k *ar) ...@@ -1685,7 +1698,8 @@ int ath10k_debug_start(struct ath10k *ar)
ret); ret);
if (ar->debug.fw_dbglog_mask) { if (ar->debug.fw_dbglog_mask) {
ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask); ret = ath10k_wmi_dbglog_cfg(ar, ar->debug.fw_dbglog_mask,
ATH10K_DBGLOG_LEVEL_WARN);
if (ret) if (ret)
/* not serious */ /* not serious */
ath10k_warn(ar, "failed to enable dbglog during start: %d", ath10k_warn(ar, "failed to enable dbglog during start: %d",
......
...@@ -48,6 +48,12 @@ enum ath10k_pktlog_filter { ...@@ -48,6 +48,12 @@ enum ath10k_pktlog_filter {
ATH10K_PKTLOG_ANY = 0x00000001f, ATH10K_PKTLOG_ANY = 0x00000001f,
}; };
enum ath10k_dbg_aggr_mode {
ATH10K_DBG_AGGR_MODE_AUTO,
ATH10K_DBG_AGGR_MODE_MANUAL,
ATH10K_DBG_AGGR_MODE_MAX,
};
extern unsigned int ath10k_debug_mask; extern unsigned int ath10k_debug_mask;
__printf(2, 3) void ath10k_info(struct ath10k *ar, const char *fmt, ...); __printf(2, 3) void ath10k_info(struct ath10k *ar, const char *fmt, ...);
...@@ -77,7 +83,6 @@ int ath10k_debug_get_et_sset_count(struct ieee80211_hw *hw, ...@@ -77,7 +83,6 @@ int ath10k_debug_get_et_sset_count(struct ieee80211_hw *hw,
void ath10k_debug_get_et_stats(struct ieee80211_hw *hw, void ath10k_debug_get_et_stats(struct ieee80211_hw *hw,
struct ieee80211_vif *vif, struct ieee80211_vif *vif,
struct ethtool_stats *stats, u64 *data); struct ethtool_stats *stats, u64 *data);
#else #else
static inline int ath10k_debug_start(struct ath10k *ar) static inline int ath10k_debug_start(struct ath10k *ar)
{ {
...@@ -129,6 +134,10 @@ ath10k_debug_get_new_fw_crash_data(struct ath10k *ar) ...@@ -129,6 +134,10 @@ ath10k_debug_get_new_fw_crash_data(struct ath10k *ar)
#define ath10k_debug_get_et_stats NULL #define ath10k_debug_get_et_stats NULL
#endif /* CONFIG_ATH10K_DEBUGFS */ #endif /* CONFIG_ATH10K_DEBUGFS */
#ifdef CONFIG_MAC80211_DEBUGFS
void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir);
#endif /* CONFIG_MAC80211_DEBUGFS */
#ifdef CONFIG_ATH10K_DEBUG #ifdef CONFIG_ATH10K_DEBUG
__printf(3, 4) void ath10k_dbg(struct ath10k *ar, __printf(3, 4) void ath10k_dbg(struct ath10k *ar,
......
/*
* Copyright (c) 2014 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "core.h"
#include "wmi-ops.h"
#include "debug.h"
static ssize_t ath10k_dbg_sta_read_aggr_mode(struct file *file,
char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
struct ath10k *ar = arsta->arvif->ar;
char buf[32];
int len = 0;
mutex_lock(&ar->conf_mutex);
len = scnprintf(buf, sizeof(buf) - len, "aggregation mode: %s\n",
(arsta->aggr_mode == ATH10K_DBG_AGGR_MODE_AUTO) ?
"auto" : "manual");
mutex_unlock(&ar->conf_mutex);
return simple_read_from_buffer(user_buf, count, ppos, buf, len);
}
static ssize_t ath10k_dbg_sta_write_aggr_mode(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
struct ath10k *ar = arsta->arvif->ar;
u32 aggr_mode;
int ret;
if (kstrtouint_from_user(user_buf, count, 0, &aggr_mode))
return -EINVAL;
if (aggr_mode >= ATH10K_DBG_AGGR_MODE_MAX)
return -EINVAL;
mutex_lock(&ar->conf_mutex);
if ((ar->state != ATH10K_STATE_ON) ||
(aggr_mode == arsta->aggr_mode)) {
ret = count;
goto out;
}
ret = ath10k_wmi_addba_clear_resp(ar, arsta->arvif->vdev_id, sta->addr);
if (ret) {
ath10k_warn(ar, "failed to clear addba session ret: %d\n", ret);
goto out;
}
arsta->aggr_mode = aggr_mode;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static const struct file_operations fops_aggr_mode = {
.read = ath10k_dbg_sta_read_aggr_mode,
.write = ath10k_dbg_sta_write_aggr_mode,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath10k_dbg_sta_write_addba(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
struct ath10k *ar = arsta->arvif->ar;
u32 tid, buf_size;
int ret;
char buf[64];
simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
/* make sure that buf is null terminated */
buf[sizeof(buf) - 1] = '\0';
ret = sscanf(buf, "%u %u", &tid, &buf_size);
if (ret != 2)
return -EINVAL;
/* Valid TID values are 0 through 15 */
if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2)
return -EINVAL;
mutex_lock(&ar->conf_mutex);
if ((ar->state != ATH10K_STATE_ON) ||
(arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) {
ret = count;
goto out;
}
ret = ath10k_wmi_addba_send(ar, arsta->arvif->vdev_id, sta->addr,
tid, buf_size);
if (ret) {
ath10k_warn(ar, "failed to send addba request: vdev_id %u peer %pM tid %u buf_size %u\n",
arsta->arvif->vdev_id, sta->addr, tid, buf_size);
}
ret = count;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static const struct file_operations fops_addba = {
.write = ath10k_dbg_sta_write_addba,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath10k_dbg_sta_write_addba_resp(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
struct ath10k *ar = arsta->arvif->ar;
u32 tid, status;
int ret;
char buf[64];
simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
/* make sure that buf is null terminated */
buf[sizeof(buf) - 1] = '\0';
ret = sscanf(buf, "%u %u", &tid, &status);
if (ret != 2)
return -EINVAL;
/* Valid TID values are 0 through 15 */
if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2)
return -EINVAL;
mutex_lock(&ar->conf_mutex);
if ((ar->state != ATH10K_STATE_ON) ||
(arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) {
ret = count;
goto out;
}
ret = ath10k_wmi_addba_set_resp(ar, arsta->arvif->vdev_id, sta->addr,
tid, status);
if (ret) {
ath10k_warn(ar, "failed to send addba response: vdev_id %u peer %pM tid %u status%u\n",
arsta->arvif->vdev_id, sta->addr, tid, status);
}
ret = count;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static const struct file_operations fops_addba_resp = {
.write = ath10k_dbg_sta_write_addba_resp,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
static ssize_t ath10k_dbg_sta_write_delba(struct file *file,
const char __user *user_buf,
size_t count, loff_t *ppos)
{
struct ieee80211_sta *sta = file->private_data;
struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
struct ath10k *ar = arsta->arvif->ar;
u32 tid, initiator, reason;
int ret;
char buf[64];
simple_write_to_buffer(buf, sizeof(buf) - 1, ppos, user_buf, count);
/* make sure that buf is null terminated */
buf[sizeof(buf) - 1] = '\0';
ret = sscanf(buf, "%u %u %u", &tid, &initiator, &reason);
if (ret != 3)
return -EINVAL;
/* Valid TID values are 0 through 15 */
if (tid > HTT_DATA_TX_EXT_TID_MGMT - 2)
return -EINVAL;
mutex_lock(&ar->conf_mutex);
if ((ar->state != ATH10K_STATE_ON) ||
(arsta->aggr_mode != ATH10K_DBG_AGGR_MODE_MANUAL)) {
ret = count;
goto out;
}
ret = ath10k_wmi_delba_send(ar, arsta->arvif->vdev_id, sta->addr,
tid, initiator, reason);
if (ret) {
ath10k_warn(ar, "failed to send delba: vdev_id %u peer %pM tid %u initiator %u reason %u\n",
arsta->arvif->vdev_id, sta->addr, tid, initiator,
reason);
}
ret = count;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static const struct file_operations fops_delba = {
.write = ath10k_dbg_sta_write_delba,
.open = simple_open,
.owner = THIS_MODULE,
.llseek = default_llseek,
};
void ath10k_sta_add_debugfs(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
struct ieee80211_sta *sta, struct dentry *dir)
{
debugfs_create_file("aggr_mode", S_IRUGO | S_IWUSR, dir, sta,
&fops_aggr_mode);
debugfs_create_file("addba", S_IWUSR, dir, sta, &fops_addba);
debugfs_create_file("addba_resp", S_IWUSR, dir, sta, &fops_addba_resp);
debugfs_create_file("delba", S_IWUSR, dir, sta, &fops_delba);
}
...@@ -703,11 +703,9 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc, ...@@ -703,11 +703,9 @@ int ath10k_htc_connect_service(struct ath10k_htc *htc,
/* wait for response */ /* wait for response */
status = wait_for_completion_timeout(&htc->ctl_resp, status = wait_for_completion_timeout(&htc->ctl_resp,
ATH10K_HTC_CONN_SVC_TIMEOUT_HZ); ATH10K_HTC_CONN_SVC_TIMEOUT_HZ);
if (status <= 0) { if (status == 0) {
if (status == 0)
status = -ETIMEDOUT;
ath10k_err(ar, "Service connect timeout: %d\n", status); ath10k_err(ar, "Service connect timeout: %d\n", status);
return status; return -ETIMEDOUT;
} }
/* we controlled the buffer creation, it's aligned */ /* we controlled the buffer creation, it's aligned */
......
...@@ -53,7 +53,6 @@ int ath10k_htt_init(struct ath10k *ar) ...@@ -53,7 +53,6 @@ int ath10k_htt_init(struct ath10k *ar)
struct ath10k_htt *htt = &ar->htt; struct ath10k_htt *htt = &ar->htt;
htt->ar = ar; htt->ar = ar;
htt->max_throughput_mbps = 800;
/* /*
* Prefetch enough data to satisfy target * Prefetch enough data to satisfy target
...@@ -102,7 +101,7 @@ int ath10k_htt_setup(struct ath10k_htt *htt) ...@@ -102,7 +101,7 @@ int ath10k_htt_setup(struct ath10k_htt *htt)
status = wait_for_completion_timeout(&htt->target_version_received, status = wait_for_completion_timeout(&htt->target_version_received,
HTT_TARGET_VERSION_TIMEOUT_HZ); HTT_TARGET_VERSION_TIMEOUT_HZ);
if (status <= 0) { if (status == 0) {
ath10k_warn(ar, "htt version request timed out\n"); ath10k_warn(ar, "htt version request timed out\n");
return -ETIMEDOUT; return -ETIMEDOUT;
} }
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/bug.h> #include <linux/bug.h>
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/dmapool.h> #include <linux/dmapool.h>
#include <linux/hashtable.h>
#include <net/mac80211.h> #include <net/mac80211.h>
#include "htc.h" #include "htc.h"
...@@ -286,7 +287,19 @@ enum htt_t2h_msg_type { ...@@ -286,7 +287,19 @@ enum htt_t2h_msg_type {
HTT_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc, HTT_T2H_MSG_TYPE_RC_UPDATE_IND = 0xc,
HTT_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd, HTT_T2H_MSG_TYPE_TX_INSPECT_IND = 0xd,
HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION = 0xe, HTT_T2H_MSG_TYPE_MGMT_TX_COMPLETION = 0xe,
HTT_T2H_MSG_TYPE_TX_CREDIT_UPDATE_IND = 0xf,
HTT_T2H_MSG_TYPE_RX_PN_IND = 0x10,
HTT_T2H_MSG_TYPE_RX_OFFLOAD_DELIVER_IND = 0x11,
HTT_T2H_MSG_TYPE_RX_IN_ORD_PADDR_IND = 0x12,
/* 0x13 reservd */
HTT_T2H_MSG_TYPE_WDI_IPA_OP_RESPONSE = 0x14,
/* FIXME: Do not depend on this event id. Numbering of this event id is
* broken across different firmware revisions and HTT version fails to
* indicate this.
*/
HTT_T2H_MSG_TYPE_TEST, HTT_T2H_MSG_TYPE_TEST,
/* keep this last */ /* keep this last */
HTT_T2H_NUM_MSGS HTT_T2H_NUM_MSGS
}; };
...@@ -655,6 +668,53 @@ struct htt_rx_fragment_indication { ...@@ -655,6 +668,53 @@ struct htt_rx_fragment_indication {
#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_MASK 0x00000FC0 #define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_MASK 0x00000FC0
#define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_LSB 6 #define HTT_RX_FRAG_IND_INFO1_FLUSH_SEQ_NUM_END_LSB 6
struct htt_rx_pn_ind {
__le16 peer_id;
u8 tid;
u8 seqno_start;
u8 seqno_end;
u8 pn_ie_count;
u8 reserved;
u8 pn_ies[0];
} __packed;
struct htt_rx_offload_msdu {
__le16 msdu_len;
__le16 peer_id;
u8 vdev_id;
u8 tid;
u8 fw_desc;
u8 payload[0];
} __packed;
struct htt_rx_offload_ind {
u8 reserved;
__le16 msdu_count;
} __packed;
struct htt_rx_in_ord_msdu_desc {
__le32 msdu_paddr;
__le16 msdu_len;
u8 fw_desc;
u8 reserved;
} __packed;
struct htt_rx_in_ord_ind {
u8 info;
__le16 peer_id;
u8 vdev_id;
u8 reserved;
__le16 msdu_count;
struct htt_rx_in_ord_msdu_desc msdu_descs[0];
} __packed;
#define HTT_RX_IN_ORD_IND_INFO_TID_MASK 0x0000001f
#define HTT_RX_IN_ORD_IND_INFO_TID_LSB 0
#define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_MASK 0x00000020
#define HTT_RX_IN_ORD_IND_INFO_OFFLOAD_LSB 5
#define HTT_RX_IN_ORD_IND_INFO_FRAG_MASK 0x00000040
#define HTT_RX_IN_ORD_IND_INFO_FRAG_LSB 6
/* /*
* target -> host test message definition * target -> host test message definition
* *
...@@ -1150,6 +1210,9 @@ struct htt_resp { ...@@ -1150,6 +1210,9 @@ struct htt_resp {
struct htt_rx_test rx_test; struct htt_rx_test rx_test;
struct htt_pktlog_msg pktlog_msg; struct htt_pktlog_msg pktlog_msg;
struct htt_stats_conf stats_conf; struct htt_stats_conf stats_conf;
struct htt_rx_pn_ind rx_pn_ind;
struct htt_rx_offload_ind rx_offload_ind;
struct htt_rx_in_ord_ind rx_in_ord_ind;
}; };
} __packed; } __packed;
...@@ -1182,7 +1245,6 @@ struct ath10k_htt { ...@@ -1182,7 +1245,6 @@ struct ath10k_htt {
struct ath10k *ar; struct ath10k *ar;
enum ath10k_htc_ep_id eid; enum ath10k_htc_ep_id eid;
int max_throughput_mbps;
u8 target_version_major; u8 target_version_major;
u8 target_version_minor; u8 target_version_minor;
struct completion target_version_received; struct completion target_version_received;
...@@ -1198,6 +1260,20 @@ struct ath10k_htt { ...@@ -1198,6 +1260,20 @@ struct ath10k_htt {
* filled. * filled.
*/ */
struct sk_buff **netbufs_ring; struct sk_buff **netbufs_ring;
/* This is used only with firmware supporting IN_ORD_IND.
*
* With Full Rx Reorder the HTT Rx Ring is more of a temporary
* buffer ring from which buffer addresses are copied by the
* firmware to MAC Rx ring. Firmware then delivers IN_ORD_IND
* pointing to specific (re-ordered) buffers.
*
* FIXME: With kernel generic hashing functions there's a lot
* of hash collisions for sk_buffs.
*/
bool in_ord_rx;
DECLARE_HASHTABLE(skb_table, 4);
/* /*
* Ring of buffer addresses - * Ring of buffer addresses -
* This ring holds the "physical" device address of the * This ring holds the "physical" device address of the
...@@ -1252,12 +1328,11 @@ struct ath10k_htt { ...@@ -1252,12 +1328,11 @@ struct ath10k_htt {
unsigned int prefetch_len; unsigned int prefetch_len;
/* Protects access to %pending_tx, %used_msdu_ids */ /* Protects access to pending_tx, num_pending_tx */
spinlock_t tx_lock; spinlock_t tx_lock;
int max_num_pending_tx; int max_num_pending_tx;
int num_pending_tx; int num_pending_tx;
struct sk_buff **pending_tx; struct idr pending_tx;
unsigned long *used_msdu_ids; /* bitmap */
wait_queue_head_t empty_tx_wq; wait_queue_head_t empty_tx_wq;
struct dma_pool *tx_pool; struct dma_pool *tx_pool;
...@@ -1271,6 +1346,7 @@ struct ath10k_htt { ...@@ -1271,6 +1346,7 @@ struct ath10k_htt {
struct tasklet_struct txrx_compl_task; struct tasklet_struct txrx_compl_task;
struct sk_buff_head tx_compl_q; struct sk_buff_head tx_compl_q;
struct sk_buff_head rx_compl_q; struct sk_buff_head rx_compl_q;
struct sk_buff_head rx_in_ord_compl_q;
/* rx_status template */ /* rx_status template */
struct ieee80211_rx_status rx_status; struct ieee80211_rx_status rx_status;
...@@ -1334,6 +1410,7 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt); ...@@ -1334,6 +1410,7 @@ int ath10k_htt_tx_alloc(struct ath10k_htt *htt);
void ath10k_htt_tx_free(struct ath10k_htt *htt); void ath10k_htt_tx_free(struct ath10k_htt *htt);
int ath10k_htt_rx_alloc(struct ath10k_htt *htt); int ath10k_htt_rx_alloc(struct ath10k_htt *htt);
int ath10k_htt_rx_ring_refill(struct ath10k *ar);
void ath10k_htt_rx_free(struct ath10k_htt *htt); void ath10k_htt_rx_free(struct ath10k_htt *htt);
void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb); void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb);
...@@ -1346,7 +1423,7 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt, ...@@ -1346,7 +1423,7 @@ int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt,
u8 max_subfrms_amsdu); u8 max_subfrms_amsdu);
void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt); void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt);
int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt); int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb);
void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id); void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id);
int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *); int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *);
int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *); int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *);
......
This diff is collapsed.
...@@ -56,21 +56,18 @@ static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt) ...@@ -56,21 +56,18 @@ static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt)
return ret; return ret;
} }
int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt) int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb)
{ {
struct ath10k *ar = htt->ar; struct ath10k *ar = htt->ar;
int msdu_id; int ret;
lockdep_assert_held(&htt->tx_lock); lockdep_assert_held(&htt->tx_lock);
msdu_id = find_first_zero_bit(htt->used_msdu_ids, ret = idr_alloc(&htt->pending_tx, skb, 0, 0x10000, GFP_ATOMIC);
htt->max_num_pending_tx);
if (msdu_id == htt->max_num_pending_tx) ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret);
return -ENOBUFS;
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", msdu_id); return ret;
__set_bit(msdu_id, htt->used_msdu_ids);
return msdu_id;
} }
void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
...@@ -79,74 +76,53 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) ...@@ -79,74 +76,53 @@ void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id)
lockdep_assert_held(&htt->tx_lock); lockdep_assert_held(&htt->tx_lock);
if (!test_bit(msdu_id, htt->used_msdu_ids))
ath10k_warn(ar, "trying to free unallocated msdu_id %d\n",
msdu_id);
ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id); ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id);
__clear_bit(msdu_id, htt->used_msdu_ids);
idr_remove(&htt->pending_tx, msdu_id);
} }
int ath10k_htt_tx_alloc(struct ath10k_htt *htt) int ath10k_htt_tx_alloc(struct ath10k_htt *htt)
{ {
struct ath10k *ar = htt->ar; struct ath10k *ar = htt->ar;
spin_lock_init(&htt->tx_lock);
ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n",
htt->max_num_pending_tx); htt->max_num_pending_tx);
htt->pending_tx = kzalloc(sizeof(*htt->pending_tx) * spin_lock_init(&htt->tx_lock);
htt->max_num_pending_tx, GFP_KERNEL); idr_init(&htt->pending_tx);
if (!htt->pending_tx)
return -ENOMEM;
htt->used_msdu_ids = kzalloc(sizeof(unsigned long) *
BITS_TO_LONGS(htt->max_num_pending_tx),
GFP_KERNEL);
if (!htt->used_msdu_ids) {
kfree(htt->pending_tx);
return -ENOMEM;
}
htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev, htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev,
sizeof(struct ath10k_htt_txbuf), 4, 0); sizeof(struct ath10k_htt_txbuf), 4, 0);
if (!htt->tx_pool) { if (!htt->tx_pool) {
kfree(htt->used_msdu_ids); idr_destroy(&htt->pending_tx);
kfree(htt->pending_tx);
return -ENOMEM; return -ENOMEM;
} }
return 0; return 0;
} }
static void ath10k_htt_tx_free_pending(struct ath10k_htt *htt) static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx)
{ {
struct ath10k *ar = htt->ar; struct ath10k *ar = ctx;
struct ath10k_htt *htt = &ar->htt;
struct htt_tx_done tx_done = {0}; struct htt_tx_done tx_done = {0};
int msdu_id;
spin_lock_bh(&htt->tx_lock);
for (msdu_id = 0; msdu_id < htt->max_num_pending_tx; msdu_id++) {
if (!test_bit(msdu_id, htt->used_msdu_ids))
continue;
ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id);
msdu_id);
tx_done.discard = 1; tx_done.discard = 1;
tx_done.msdu_id = msdu_id; tx_done.msdu_id = msdu_id;
ath10k_txrx_tx_unref(htt, &tx_done); spin_lock_bh(&htt->tx_lock);
} ath10k_txrx_tx_unref(htt, &tx_done);
spin_unlock_bh(&htt->tx_lock); spin_unlock_bh(&htt->tx_lock);
return 0;
} }
void ath10k_htt_tx_free(struct ath10k_htt *htt) void ath10k_htt_tx_free(struct ath10k_htt *htt)
{ {
ath10k_htt_tx_free_pending(htt); idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar);
kfree(htt->pending_tx); idr_destroy(&htt->pending_tx);
kfree(htt->used_msdu_ids);
dma_pool_destroy(htt->tx_pool); dma_pool_destroy(htt->tx_pool);
} }
...@@ -378,13 +354,12 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) ...@@ -378,13 +354,12 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
len += sizeof(cmd->mgmt_tx); len += sizeof(cmd->mgmt_tx);
spin_lock_bh(&htt->tx_lock); spin_lock_bh(&htt->tx_lock);
res = ath10k_htt_tx_alloc_msdu_id(htt); res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
if (res < 0) { if (res < 0) {
spin_unlock_bh(&htt->tx_lock); spin_unlock_bh(&htt->tx_lock);
goto err_tx_dec; goto err_tx_dec;
} }
msdu_id = res; msdu_id = res;
htt->pending_tx[msdu_id] = msdu;
spin_unlock_bh(&htt->tx_lock); spin_unlock_bh(&htt->tx_lock);
txdesc = ath10k_htc_alloc_skb(ar, len); txdesc = ath10k_htc_alloc_skb(ar, len);
...@@ -423,7 +398,6 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) ...@@ -423,7 +398,6 @@ int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
dev_kfree_skb_any(txdesc); dev_kfree_skb_any(txdesc);
err_free_msdu_id: err_free_msdu_id:
spin_lock_bh(&htt->tx_lock); spin_lock_bh(&htt->tx_lock);
htt->pending_tx[msdu_id] = NULL;
ath10k_htt_tx_free_msdu_id(htt, msdu_id); ath10k_htt_tx_free_msdu_id(htt, msdu_id);
spin_unlock_bh(&htt->tx_lock); spin_unlock_bh(&htt->tx_lock);
err_tx_dec: err_tx_dec:
...@@ -455,13 +429,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) ...@@ -455,13 +429,12 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
goto err; goto err;
spin_lock_bh(&htt->tx_lock); spin_lock_bh(&htt->tx_lock);
res = ath10k_htt_tx_alloc_msdu_id(htt); res = ath10k_htt_tx_alloc_msdu_id(htt, msdu);
if (res < 0) { if (res < 0) {
spin_unlock_bh(&htt->tx_lock); spin_unlock_bh(&htt->tx_lock);
goto err_tx_dec; goto err_tx_dec;
} }
msdu_id = res; msdu_id = res;
htt->pending_tx[msdu_id] = msdu;
spin_unlock_bh(&htt->tx_lock); spin_unlock_bh(&htt->tx_lock);
prefetch_len = min(htt->prefetch_len, msdu->len); prefetch_len = min(htt->prefetch_len, msdu->len);
...@@ -475,10 +448,18 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) ...@@ -475,10 +448,18 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC, skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC,
&paddr); &paddr);
if (!skb_cb->htt.txbuf) if (!skb_cb->htt.txbuf) {
res = -ENOMEM;
goto err_free_msdu_id; goto err_free_msdu_id;
}
skb_cb->htt.txbuf_paddr = paddr; skb_cb->htt.txbuf_paddr = paddr;
if ((ieee80211_is_action(hdr->frame_control) ||
ieee80211_is_deauth(hdr->frame_control) ||
ieee80211_is_disassoc(hdr->frame_control)) &&
ieee80211_has_protected(hdr->frame_control))
skb_put(msdu, IEEE80211_CCMP_MIC_LEN);
skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
res = dma_mapping_error(dev, skb_cb->paddr); res = dma_mapping_error(dev, skb_cb->paddr);
...@@ -534,8 +515,10 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) ...@@ -534,8 +515,10 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID);
flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID);
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; if (msdu->ip_summed == CHECKSUM_PARTIAL) {
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD;
flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD;
}
/* Prevent firmware from sending up tx inspection requests. There's /* Prevent firmware from sending up tx inspection requests. There's
* nothing ath10k can do with frames requested for inspection so force * nothing ath10k can do with frames requested for inspection so force
...@@ -593,7 +576,6 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) ...@@ -593,7 +576,6 @@ int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu)
skb_cb->htt.txbuf_paddr); skb_cb->htt.txbuf_paddr);
err_free_msdu_id: err_free_msdu_id:
spin_lock_bh(&htt->tx_lock); spin_lock_bh(&htt->tx_lock);
htt->pending_tx[msdu_id] = NULL;
ath10k_htt_tx_free_msdu_id(htt, msdu_id); ath10k_htt_tx_free_msdu_id(htt, msdu_id);
spin_unlock_bh(&htt->tx_lock); spin_unlock_bh(&htt->tx_lock);
err_tx_dec: err_tx_dec:
......
/* /*
* Copyright (c) 2014 Qualcomm Atheros, Inc. * Copyright (c) 2014-2015 Qualcomm Atheros, Inc.
* *
* Permission to use, copy, modify, and/or distribute this software for any * Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above * purpose with or without fee is hereby granted, provided that the above
...@@ -14,11 +14,45 @@ ...@@ -14,11 +14,45 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/ */
#ifndef __WIL_PLATFORM__MSM_H__ #include <linux/types.h>
#define __WIL_PLATFORM_MSM_H__ #include "hw.h"
#include "wil_platform.h" const struct ath10k_hw_regs qca988x_regs = {
.rtc_state_cold_reset_mask = 0x00000400,
.rtc_soc_base_address = 0x00004000,
.rtc_wmac_base_address = 0x00005000,
.soc_core_base_address = 0x00009000,
.ce_wrapper_base_address = 0x00057000,
.ce0_base_address = 0x00057400,
.ce1_base_address = 0x00057800,
.ce2_base_address = 0x00057c00,
.ce3_base_address = 0x00058000,
.ce4_base_address = 0x00058400,
.ce5_base_address = 0x00058800,
.ce6_base_address = 0x00058c00,
.ce7_base_address = 0x00059000,
.soc_reset_control_si0_rst_mask = 0x00000001,
.soc_reset_control_ce_rst_mask = 0x00040000,
.soc_chip_id_address = 0x00ec,
.scratch_3_address = 0x0030,
};
void *wil_platform_msm_init(struct device *dev, struct wil_platform_ops *ops); const struct ath10k_hw_regs qca6174_regs = {
.rtc_state_cold_reset_mask = 0x00002000,
#endif /* __WIL_PLATFORM__MSM_H__ */ .rtc_soc_base_address = 0x00000800,
.rtc_wmac_base_address = 0x00001000,
.soc_core_base_address = 0x0003a000,
.ce_wrapper_base_address = 0x00034000,
.ce0_base_address = 0x00034400,
.ce1_base_address = 0x00034800,
.ce2_base_address = 0x00034c00,
.ce3_base_address = 0x00035000,
.ce4_base_address = 0x00035400,
.ce5_base_address = 0x00035800,
.ce6_base_address = 0x00035c00,
.ce7_base_address = 0x00036000,
.soc_reset_control_si0_rst_mask = 0x00000000,
.soc_reset_control_ce_rst_mask = 0x00000001,
.soc_chip_id_address = 0x000f0,
.scratch_3_address = 0x0028,
};
...@@ -34,6 +34,44 @@ ...@@ -34,6 +34,44 @@
#define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin" #define QCA988X_HW_2_0_BOARD_DATA_FILE "board.bin"
#define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234 #define QCA988X_HW_2_0_PATCH_LOAD_ADDR 0x1234
/* QCA6174 target BMI version signatures */
#define QCA6174_HW_1_0_VERSION 0x05000000
#define QCA6174_HW_1_1_VERSION 0x05000001
#define QCA6174_HW_1_3_VERSION 0x05000003
#define QCA6174_HW_2_1_VERSION 0x05010000
#define QCA6174_HW_3_0_VERSION 0x05020000
#define QCA6174_HW_3_2_VERSION 0x05030000
enum qca6174_pci_rev {
QCA6174_PCI_REV_1_1 = 0x11,
QCA6174_PCI_REV_1_3 = 0x13,
QCA6174_PCI_REV_2_0 = 0x20,
QCA6174_PCI_REV_3_0 = 0x30,
};
enum qca6174_chip_id_rev {
QCA6174_HW_1_0_CHIP_ID_REV = 0,
QCA6174_HW_1_1_CHIP_ID_REV = 1,
QCA6174_HW_1_3_CHIP_ID_REV = 2,
QCA6174_HW_2_1_CHIP_ID_REV = 4,
QCA6174_HW_2_2_CHIP_ID_REV = 5,
QCA6174_HW_3_0_CHIP_ID_REV = 8,
QCA6174_HW_3_1_CHIP_ID_REV = 9,
QCA6174_HW_3_2_CHIP_ID_REV = 10,
};
#define QCA6174_HW_2_1_FW_DIR "ath10k/QCA6174/hw2.1"
#define QCA6174_HW_2_1_FW_FILE "firmware.bin"
#define QCA6174_HW_2_1_OTP_FILE "otp.bin"
#define QCA6174_HW_2_1_BOARD_DATA_FILE "board.bin"
#define QCA6174_HW_2_1_PATCH_LOAD_ADDR 0x1234
#define QCA6174_HW_3_0_FW_DIR "ath10k/QCA6174/hw3.0"
#define QCA6174_HW_3_0_FW_FILE "firmware.bin"
#define QCA6174_HW_3_0_OTP_FILE "otp.bin"
#define QCA6174_HW_3_0_BOARD_DATA_FILE "board.bin"
#define QCA6174_HW_3_0_PATCH_LOAD_ADDR 0x1234
#define ATH10K_FW_API2_FILE "firmware-2.bin" #define ATH10K_FW_API2_FILE "firmware-2.bin"
#define ATH10K_FW_API3_FILE "firmware-3.bin" #define ATH10K_FW_API3_FILE "firmware-3.bin"
...@@ -81,6 +119,37 @@ enum ath10k_fw_wmi_op_version { ...@@ -81,6 +119,37 @@ enum ath10k_fw_wmi_op_version {
ATH10K_FW_WMI_OP_VERSION_MAX, ATH10K_FW_WMI_OP_VERSION_MAX,
}; };
enum ath10k_hw_rev {
ATH10K_HW_QCA988X,
ATH10K_HW_QCA6174,
};
struct ath10k_hw_regs {
u32 rtc_state_cold_reset_mask;
u32 rtc_soc_base_address;
u32 rtc_wmac_base_address;
u32 soc_core_base_address;
u32 ce_wrapper_base_address;
u32 ce0_base_address;
u32 ce1_base_address;
u32 ce2_base_address;
u32 ce3_base_address;
u32 ce4_base_address;
u32 ce5_base_address;
u32 ce6_base_address;
u32 ce7_base_address;
u32 soc_reset_control_si0_rst_mask;
u32 soc_reset_control_ce_rst_mask;
u32 soc_chip_id_address;
u32 scratch_3_address;
};
extern const struct ath10k_hw_regs qca988x_regs;
extern const struct ath10k_hw_regs qca6174_regs;
#define QCA_REV_988X(ar) ((ar)->hw_rev == ATH10K_HW_QCA988X)
#define QCA_REV_6174(ar) ((ar)->hw_rev == ATH10K_HW_QCA6174)
/* Known pecularities: /* Known pecularities:
* - current FW doesn't support raw rx mode (last tested v599) * - current FW doesn't support raw rx mode (last tested v599)
* - current FW dumps upon raw tx mode (last tested v599) * - current FW dumps upon raw tx mode (last tested v599)
...@@ -183,6 +252,9 @@ struct ath10k_pktlog_hdr { ...@@ -183,6 +252,9 @@ struct ath10k_pktlog_hdr {
#define TARGET_10X_NUM_MSDU_DESC (1024 + 400) #define TARGET_10X_NUM_MSDU_DESC (1024 + 400)
#define TARGET_10X_MAX_FRAG_ENTRIES 0 #define TARGET_10X_MAX_FRAG_ENTRIES 0
/* 10.2 parameters */
#define TARGET_10_2_DMA_BURST_SIZE 1
/* Target specific defines for WMI-TLV firmware */ /* Target specific defines for WMI-TLV firmware */
#define TARGET_TLV_NUM_VDEVS 3 #define TARGET_TLV_NUM_VDEVS 3
#define TARGET_TLV_NUM_STATIONS 32 #define TARGET_TLV_NUM_STATIONS 32
...@@ -222,7 +294,7 @@ struct ath10k_pktlog_hdr { ...@@ -222,7 +294,7 @@ struct ath10k_pktlog_hdr {
/* as of IP3.7.1 */ /* as of IP3.7.1 */
#define RTC_STATE_V_ON 3 #define RTC_STATE_V_ON 3
#define RTC_STATE_COLD_RESET_MASK 0x00000400 #define RTC_STATE_COLD_RESET_MASK ar->regs->rtc_state_cold_reset_mask
#define RTC_STATE_V_LSB 0 #define RTC_STATE_V_LSB 0
#define RTC_STATE_V_MASK 0x00000007 #define RTC_STATE_V_MASK 0x00000007
#define RTC_STATE_ADDRESS 0x0000 #define RTC_STATE_ADDRESS 0x0000
...@@ -231,12 +303,12 @@ struct ath10k_pktlog_hdr { ...@@ -231,12 +303,12 @@ struct ath10k_pktlog_hdr {
#define PCIE_SOC_WAKE_RESET 0x00000000 #define PCIE_SOC_WAKE_RESET 0x00000000
#define SOC_GLOBAL_RESET_ADDRESS 0x0008 #define SOC_GLOBAL_RESET_ADDRESS 0x0008
#define RTC_SOC_BASE_ADDRESS 0x00004000 #define RTC_SOC_BASE_ADDRESS ar->regs->rtc_soc_base_address
#define RTC_WMAC_BASE_ADDRESS 0x00005000 #define RTC_WMAC_BASE_ADDRESS ar->regs->rtc_wmac_base_address
#define MAC_COEX_BASE_ADDRESS 0x00006000 #define MAC_COEX_BASE_ADDRESS 0x00006000
#define BT_COEX_BASE_ADDRESS 0x00007000 #define BT_COEX_BASE_ADDRESS 0x00007000
#define SOC_PCIE_BASE_ADDRESS 0x00008000 #define SOC_PCIE_BASE_ADDRESS 0x00008000
#define SOC_CORE_BASE_ADDRESS 0x00009000 #define SOC_CORE_BASE_ADDRESS ar->regs->soc_core_base_address
#define WLAN_UART_BASE_ADDRESS 0x0000c000 #define WLAN_UART_BASE_ADDRESS 0x0000c000
#define WLAN_SI_BASE_ADDRESS 0x00010000 #define WLAN_SI_BASE_ADDRESS 0x00010000
#define WLAN_GPIO_BASE_ADDRESS 0x00014000 #define WLAN_GPIO_BASE_ADDRESS 0x00014000
...@@ -245,23 +317,23 @@ struct ath10k_pktlog_hdr { ...@@ -245,23 +317,23 @@ struct ath10k_pktlog_hdr {
#define EFUSE_BASE_ADDRESS 0x00030000 #define EFUSE_BASE_ADDRESS 0x00030000
#define FPGA_REG_BASE_ADDRESS 0x00039000 #define FPGA_REG_BASE_ADDRESS 0x00039000
#define WLAN_UART2_BASE_ADDRESS 0x00054c00 #define WLAN_UART2_BASE_ADDRESS 0x00054c00
#define CE_WRAPPER_BASE_ADDRESS 0x00057000 #define CE_WRAPPER_BASE_ADDRESS ar->regs->ce_wrapper_base_address
#define CE0_BASE_ADDRESS 0x00057400 #define CE0_BASE_ADDRESS ar->regs->ce0_base_address
#define CE1_BASE_ADDRESS 0x00057800 #define CE1_BASE_ADDRESS ar->regs->ce1_base_address
#define CE2_BASE_ADDRESS 0x00057c00 #define CE2_BASE_ADDRESS ar->regs->ce2_base_address
#define CE3_BASE_ADDRESS 0x00058000 #define CE3_BASE_ADDRESS ar->regs->ce3_base_address
#define CE4_BASE_ADDRESS 0x00058400 #define CE4_BASE_ADDRESS ar->regs->ce4_base_address
#define CE5_BASE_ADDRESS 0x00058800 #define CE5_BASE_ADDRESS ar->regs->ce5_base_address
#define CE6_BASE_ADDRESS 0x00058c00 #define CE6_BASE_ADDRESS ar->regs->ce6_base_address
#define CE7_BASE_ADDRESS 0x00059000 #define CE7_BASE_ADDRESS ar->regs->ce7_base_address
#define DBI_BASE_ADDRESS 0x00060000 #define DBI_BASE_ADDRESS 0x00060000
#define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x0006c000 #define WLAN_ANALOG_INTF_PCIE_BASE_ADDRESS 0x0006c000
#define PCIE_LOCAL_BASE_ADDRESS 0x00080000 #define PCIE_LOCAL_BASE_ADDRESS 0x00080000
#define SOC_RESET_CONTROL_ADDRESS 0x00000000 #define SOC_RESET_CONTROL_ADDRESS 0x00000000
#define SOC_RESET_CONTROL_OFFSET 0x00000000 #define SOC_RESET_CONTROL_OFFSET 0x00000000
#define SOC_RESET_CONTROL_SI0_RST_MASK 0x00000001 #define SOC_RESET_CONTROL_SI0_RST_MASK ar->regs->soc_reset_control_si0_rst_mask
#define SOC_RESET_CONTROL_CE_RST_MASK 0x00040000 #define SOC_RESET_CONTROL_CE_RST_MASK ar->regs->soc_reset_control_ce_rst_mask
#define SOC_RESET_CONTROL_CPU_WARM_RST_MASK 0x00000040 #define SOC_RESET_CONTROL_CPU_WARM_RST_MASK 0x00000040
#define SOC_CPU_CLOCK_OFFSET 0x00000020 #define SOC_CPU_CLOCK_OFFSET 0x00000020
#define SOC_CPU_CLOCK_STANDARD_LSB 0 #define SOC_CPU_CLOCK_STANDARD_LSB 0
...@@ -275,7 +347,7 @@ struct ath10k_pktlog_hdr { ...@@ -275,7 +347,7 @@ struct ath10k_pktlog_hdr {
#define SOC_LF_TIMER_CONTROL0_ADDRESS 0x00000050 #define SOC_LF_TIMER_CONTROL0_ADDRESS 0x00000050
#define SOC_LF_TIMER_CONTROL0_ENABLE_MASK 0x00000004 #define SOC_LF_TIMER_CONTROL0_ENABLE_MASK 0x00000004
#define SOC_CHIP_ID_ADDRESS 0x000000ec #define SOC_CHIP_ID_ADDRESS ar->regs->soc_chip_id_address
#define SOC_CHIP_ID_REV_LSB 8 #define SOC_CHIP_ID_REV_LSB 8
#define SOC_CHIP_ID_REV_MASK 0x00000f00 #define SOC_CHIP_ID_REV_MASK 0x00000f00
...@@ -331,7 +403,7 @@ struct ath10k_pktlog_hdr { ...@@ -331,7 +403,7 @@ struct ath10k_pktlog_hdr {
#define PCIE_INTR_ENABLE_ADDRESS 0x0008 #define PCIE_INTR_ENABLE_ADDRESS 0x0008
#define PCIE_INTR_CAUSE_ADDRESS 0x000c #define PCIE_INTR_CAUSE_ADDRESS 0x000c
#define PCIE_INTR_CLR_ADDRESS 0x0014 #define PCIE_INTR_CLR_ADDRESS 0x0014
#define SCRATCH_3_ADDRESS 0x0030 #define SCRATCH_3_ADDRESS ar->regs->scratch_3_address
#define CPU_INTR_ADDRESS 0x0010 #define CPU_INTR_ADDRESS 0x0010
/* Firmware indications to the Host via SCRATCH_3 register. */ /* Firmware indications to the Host via SCRATCH_3 register. */
......
This diff is collapsed.
...@@ -58,9 +58,11 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)"); ...@@ -58,9 +58,11 @@ MODULE_PARM_DESC(reset_mode, "0: auto, 1: warm only (default: 0)");
#define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3 #define ATH10K_PCI_NUM_WARM_RESET_ATTEMPTS 3
#define QCA988X_2_0_DEVICE_ID (0x003c) #define QCA988X_2_0_DEVICE_ID (0x003c)
#define QCA6174_2_1_DEVICE_ID (0x003e)
static const struct pci_device_id ath10k_pci_id_table[] = { static const struct pci_device_id ath10k_pci_id_table[] = {
{ PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */ { PCI_VDEVICE(ATHEROS, QCA988X_2_0_DEVICE_ID) }, /* PCI-E QCA988X V2 */
{ PCI_VDEVICE(ATHEROS, QCA6174_2_1_DEVICE_ID) }, /* PCI-E QCA6174 V2.1 */
{0} {0}
}; };
...@@ -70,6 +72,11 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = { ...@@ -70,6 +72,11 @@ static const struct ath10k_pci_supp_chip ath10k_pci_supp_chips[] = {
* because of that. * because of that.
*/ */
{ QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV }, { QCA988X_2_0_DEVICE_ID, QCA988X_HW_2_0_CHIP_ID_REV },
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_1_CHIP_ID_REV },
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_2_2_CHIP_ID_REV },
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_0_CHIP_ID_REV },
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_1_CHIP_ID_REV },
{ QCA6174_2_1_DEVICE_ID, QCA6174_HW_3_2_CHIP_ID_REV },
}; };
static void ath10k_pci_buffer_cleanup(struct ath10k *ar); static void ath10k_pci_buffer_cleanup(struct ath10k *ar);
...@@ -403,7 +410,7 @@ static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe) ...@@ -403,7 +410,7 @@ static int __ath10k_pci_rx_post_buf(struct ath10k_pci_pipe *pipe)
return -EIO; return -EIO;
} }
ATH10K_SKB_CB(skb)->paddr = paddr; ATH10K_SKB_RXCB(skb)->paddr = paddr;
ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr); ret = __ath10k_ce_rx_post_buf(ce_pipe, skb, paddr);
if (ret) { if (ret) {
...@@ -872,7 +879,7 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state) ...@@ -872,7 +879,7 @@ static void ath10k_pci_ce_recv_data(struct ath10k_ce_pipe *ce_state)
&flags) == 0) { &flags) == 0) {
skb = transfer_context; skb = transfer_context;
max_nbytes = skb->len + skb_tailroom(skb); max_nbytes = skb->len + skb_tailroom(skb);
dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr, dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
max_nbytes, DMA_FROM_DEVICE); max_nbytes, DMA_FROM_DEVICE);
if (unlikely(max_nbytes < nbytes)) { if (unlikely(max_nbytes < nbytes)) {
...@@ -1238,7 +1245,7 @@ static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe) ...@@ -1238,7 +1245,7 @@ static void ath10k_pci_rx_pipe_cleanup(struct ath10k_pci_pipe *pci_pipe)
ce_ring->per_transfer_context[i] = NULL; ce_ring->per_transfer_context[i] = NULL;
dma_unmap_single(ar->dev, ATH10K_SKB_CB(skb)->paddr, dma_unmap_single(ar->dev, ATH10K_SKB_RXCB(skb)->paddr,
skb->len + skb_tailroom(skb), skb->len + skb_tailroom(skb),
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
...@@ -1506,6 +1513,35 @@ static int ath10k_pci_wake_target_cpu(struct ath10k *ar) ...@@ -1506,6 +1513,35 @@ static int ath10k_pci_wake_target_cpu(struct ath10k *ar)
return 0; return 0;
} }
static int ath10k_pci_get_num_banks(struct ath10k *ar)
{
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
switch (ar_pci->pdev->device) {
case QCA988X_2_0_DEVICE_ID:
return 1;
case QCA6174_2_1_DEVICE_ID:
switch (MS(ar->chip_id, SOC_CHIP_ID_REV)) {
case QCA6174_HW_1_0_CHIP_ID_REV:
case QCA6174_HW_1_1_CHIP_ID_REV:
return 3;
case QCA6174_HW_1_3_CHIP_ID_REV:
return 2;
case QCA6174_HW_2_1_CHIP_ID_REV:
case QCA6174_HW_2_2_CHIP_ID_REV:
return 6;
case QCA6174_HW_3_0_CHIP_ID_REV:
case QCA6174_HW_3_1_CHIP_ID_REV:
case QCA6174_HW_3_2_CHIP_ID_REV:
return 9;
}
break;
}
ath10k_warn(ar, "unknown number of banks, assuming 1\n");
return 1;
}
static int ath10k_pci_init_config(struct ath10k *ar) static int ath10k_pci_init_config(struct ath10k *ar)
{ {
u32 interconnect_targ_addr; u32 interconnect_targ_addr;
...@@ -1616,7 +1652,8 @@ static int ath10k_pci_init_config(struct ath10k *ar) ...@@ -1616,7 +1652,8 @@ static int ath10k_pci_init_config(struct ath10k *ar)
/* first bank is switched to IRAM */ /* first bank is switched to IRAM */
ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) & ealloc_value |= ((HI_EARLY_ALLOC_MAGIC << HI_EARLY_ALLOC_MAGIC_SHIFT) &
HI_EARLY_ALLOC_MAGIC_MASK); HI_EARLY_ALLOC_MAGIC_MASK);
ealloc_value |= ((1 << HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) & ealloc_value |= ((ath10k_pci_get_num_banks(ar) <<
HI_EARLY_ALLOC_IRAM_BANKS_SHIFT) &
HI_EARLY_ALLOC_IRAM_BANKS_MASK); HI_EARLY_ALLOC_IRAM_BANKS_MASK);
ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value); ret = ath10k_pci_diag_write32(ar, ealloc_targ_addr, ealloc_value);
...@@ -1812,12 +1849,12 @@ static int ath10k_pci_warm_reset(struct ath10k *ar) ...@@ -1812,12 +1849,12 @@ static int ath10k_pci_warm_reset(struct ath10k *ar)
return 0; return 0;
} }
static int ath10k_pci_chip_reset(struct ath10k *ar) static int ath10k_pci_qca988x_chip_reset(struct ath10k *ar)
{ {
int i, ret; int i, ret;
u32 val; u32 val;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset\n"); ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot 988x chip reset\n");
/* Some hardware revisions (e.g. CUS223v2) has issues with cold reset. /* Some hardware revisions (e.g. CUS223v2) has issues with cold reset.
* It is thus preferred to use warm reset which is safer but may not be * It is thus preferred to use warm reset which is safer but may not be
...@@ -1881,11 +1918,53 @@ static int ath10k_pci_chip_reset(struct ath10k *ar) ...@@ -1881,11 +1918,53 @@ static int ath10k_pci_chip_reset(struct ath10k *ar)
return ret; return ret;
} }
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot chip reset complete (cold)\n"); ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca988x chip reset complete (cold)\n");
return 0; return 0;
} }
static int ath10k_pci_qca6174_chip_reset(struct ath10k *ar)
{
int ret;
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset\n");
/* FIXME: QCA6174 requires cold + warm reset to work. */
ret = ath10k_pci_cold_reset(ar);
if (ret) {
ath10k_warn(ar, "failed to cold reset: %d\n", ret);
return ret;
}
ret = ath10k_pci_wait_for_target_init(ar);
if (ret) {
ath10k_warn(ar, "failed to wait for target after cold reset: %d\n",
ret);
return ret;
}
ret = ath10k_pci_warm_reset(ar);
if (ret) {
ath10k_warn(ar, "failed to warm reset: %d\n", ret);
return ret;
}
ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot qca6174 chip reset complete (cold)\n");
return 0;
}
static int ath10k_pci_chip_reset(struct ath10k *ar)
{
if (QCA_REV_988X(ar))
return ath10k_pci_qca988x_chip_reset(ar);
else if (QCA_REV_6174(ar))
return ath10k_pci_qca6174_chip_reset(ar);
else
return -ENOTSUPP;
}
static int ath10k_pci_hif_power_up(struct ath10k *ar) static int ath10k_pci_hif_power_up(struct ath10k *ar)
{ {
int ret; int ret;
...@@ -1910,6 +1989,12 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar) ...@@ -1910,6 +1989,12 @@ static int ath10k_pci_hif_power_up(struct ath10k *ar)
*/ */
ret = ath10k_pci_chip_reset(ar); ret = ath10k_pci_chip_reset(ar);
if (ret) { if (ret) {
if (ath10k_pci_has_fw_crashed(ar)) {
ath10k_warn(ar, "firmware crashed during chip reset\n");
ath10k_pci_fw_crashed_clear(ar);
ath10k_pci_fw_crashed_dump(ar);
}
ath10k_err(ar, "failed to reset chip: %d\n", ret); ath10k_err(ar, "failed to reset chip: %d\n", ret);
goto err_sleep; goto err_sleep;
} }
...@@ -2041,6 +2126,7 @@ static void ath10k_msi_err_tasklet(unsigned long data) ...@@ -2041,6 +2126,7 @@ static void ath10k_msi_err_tasklet(unsigned long data)
return; return;
} }
ath10k_pci_irq_disable(ar);
ath10k_pci_fw_crashed_clear(ar); ath10k_pci_fw_crashed_clear(ar);
ath10k_pci_fw_crashed_dump(ar); ath10k_pci_fw_crashed_dump(ar);
} }
...@@ -2110,6 +2196,7 @@ static void ath10k_pci_tasklet(unsigned long data) ...@@ -2110,6 +2196,7 @@ static void ath10k_pci_tasklet(unsigned long data)
struct ath10k_pci *ar_pci = ath10k_pci_priv(ar); struct ath10k_pci *ar_pci = ath10k_pci_priv(ar);
if (ath10k_pci_has_fw_crashed(ar)) { if (ath10k_pci_has_fw_crashed(ar)) {
ath10k_pci_irq_disable(ar);
ath10k_pci_fw_crashed_clear(ar); ath10k_pci_fw_crashed_clear(ar);
ath10k_pci_fw_crashed_dump(ar); ath10k_pci_fw_crashed_dump(ar);
return; return;
...@@ -2352,8 +2439,6 @@ static int ath10k_pci_wait_for_target_init(struct ath10k *ar) ...@@ -2352,8 +2439,6 @@ static int ath10k_pci_wait_for_target_init(struct ath10k *ar)
if (val & FW_IND_EVENT_PENDING) { if (val & FW_IND_EVENT_PENDING) {
ath10k_warn(ar, "device has crashed during init\n"); ath10k_warn(ar, "device has crashed during init\n");
ath10k_pci_fw_crashed_clear(ar);
ath10k_pci_fw_crashed_dump(ar);
return -ECOMM; return -ECOMM;
} }
...@@ -2507,11 +2592,23 @@ static int ath10k_pci_probe(struct pci_dev *pdev, ...@@ -2507,11 +2592,23 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
int ret = 0; int ret = 0;
struct ath10k *ar; struct ath10k *ar;
struct ath10k_pci *ar_pci; struct ath10k_pci *ar_pci;
enum ath10k_hw_rev hw_rev;
u32 chip_id; u32 chip_id;
ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, switch (pci_dev->device) {
ATH10K_BUS_PCI, case QCA988X_2_0_DEVICE_ID:
&ath10k_pci_hif_ops); hw_rev = ATH10K_HW_QCA988X;
break;
case QCA6174_2_1_DEVICE_ID:
hw_rev = ATH10K_HW_QCA6174;
break;
default:
WARN_ON(1);
return -ENOTSUPP;
}
ar = ath10k_core_create(sizeof(*ar_pci), &pdev->dev, ATH10K_BUS_PCI,
hw_rev, &ath10k_pci_hif_ops);
if (!ar) { if (!ar) {
dev_err(&pdev->dev, "failed to allocate core\n"); dev_err(&pdev->dev, "failed to allocate core\n");
return -ENOMEM; return -ENOMEM;
...@@ -2540,18 +2637,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev, ...@@ -2540,18 +2637,6 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_release; goto err_release;
} }
chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
if (chip_id == 0xffffffff) {
ath10k_err(ar, "failed to get chip id\n");
goto err_sleep;
}
if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
pdev->device, chip_id);
goto err_sleep;
}
ret = ath10k_pci_alloc_pipes(ar); ret = ath10k_pci_alloc_pipes(ar);
if (ret) { if (ret) {
ath10k_err(ar, "failed to allocate copy engine pipes: %d\n", ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
...@@ -2578,6 +2663,24 @@ static int ath10k_pci_probe(struct pci_dev *pdev, ...@@ -2578,6 +2663,24 @@ static int ath10k_pci_probe(struct pci_dev *pdev,
goto err_deinit_irq; goto err_deinit_irq;
} }
ret = ath10k_pci_chip_reset(ar);
if (ret) {
ath10k_err(ar, "failed to reset chip: %d\n", ret);
goto err_free_irq;
}
chip_id = ath10k_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS);
if (chip_id == 0xffffffff) {
ath10k_err(ar, "failed to get chip id\n");
goto err_free_irq;
}
if (!ath10k_pci_chip_is_supported(pdev->device, chip_id)) {
ath10k_err(ar, "device %04x with chip_id %08x isn't supported\n",
pdev->device, chip_id);
goto err_sleep;
}
ath10k_pci_sleep(ar); ath10k_pci_sleep(ar);
ret = ath10k_core_register(ar, chip_id); ret = ath10k_core_register(ar, chip_id);
......
...@@ -194,7 +194,7 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar) ...@@ -194,7 +194,7 @@ static inline struct ath10k_pci *ath10k_pci_priv(struct ath10k *ar)
#define ATH10K_PCI_RX_POST_RETRY_MS 50 #define ATH10K_PCI_RX_POST_RETRY_MS 50
#define ATH_PCI_RESET_WAIT_MAX 10 /* ms */ #define ATH_PCI_RESET_WAIT_MAX 10 /* ms */
#define PCIE_WAKE_TIMEOUT 5000 /* 5ms */ #define PCIE_WAKE_TIMEOUT 10000 /* 10ms */
#define BAR_NUM 0 #define BAR_NUM 0
......
...@@ -850,7 +850,7 @@ struct rx_ppdu_start { ...@@ -850,7 +850,7 @@ struct rx_ppdu_start {
#define RX_PPDU_END_INFO1_PPDU_DONE (1 << 15) #define RX_PPDU_END_INFO1_PPDU_DONE (1 << 15)
struct rx_ppdu_end { struct rx_ppdu_end_common {
__le32 evm_p0; __le32 evm_p0;
__le32 evm_p1; __le32 evm_p1;
__le32 evm_p2; __le32 evm_p2;
...@@ -873,10 +873,33 @@ struct rx_ppdu_end { ...@@ -873,10 +873,33 @@ struct rx_ppdu_end {
u8 phy_err_code; u8 phy_err_code;
__le16 flags; /* %RX_PPDU_END_FLAGS_ */ __le16 flags; /* %RX_PPDU_END_FLAGS_ */
__le32 info0; /* %RX_PPDU_END_INFO0_ */ __le32 info0; /* %RX_PPDU_END_INFO0_ */
} __packed;
struct rx_ppdu_end_qca988x {
__le16 bb_length; __le16 bb_length;
__le16 info1; /* %RX_PPDU_END_INFO1_ */ __le16 info1; /* %RX_PPDU_END_INFO1_ */
} __packed; } __packed;
#define RX_PPDU_END_RTT_CORRELATION_VALUE_MASK 0x00ffffff
#define RX_PPDU_END_RTT_CORRELATION_VALUE_LSB 0
#define RX_PPDU_END_RTT_UNUSED_MASK 0x7f000000
#define RX_PPDU_END_RTT_UNUSED_LSB 24
#define RX_PPDU_END_RTT_NORMAL_MODE BIT(31)
struct rx_ppdu_end_qca6174 {
__le32 rtt; /* %RX_PPDU_END_RTT_ */
__le16 bb_length;
__le16 info1; /* %RX_PPDU_END_INFO1_ */
} __packed;
struct rx_ppdu_end {
struct rx_ppdu_end_common common;
union {
struct rx_ppdu_end_qca988x qca988x;
struct rx_ppdu_end_qca6174 qca6174;
} __packed;
} __packed;
/* /*
* evm_p0 * evm_p0
* EVM for pilot 0. Contain EVM for streams: 0, 1, 2 and 3. * EVM for pilot 0. Contain EVM for streams: 0, 1, 2 and 3.
......
...@@ -18,6 +18,8 @@ ...@@ -18,6 +18,8 @@
#ifndef __TARGADDRS_H__ #ifndef __TARGADDRS_H__
#define __TARGADDRS_H__ #define __TARGADDRS_H__
#include "hw.h"
/* /*
* xxx_HOST_INTEREST_ADDRESS is the address in Target RAM of the * xxx_HOST_INTEREST_ADDRESS is the address in Target RAM of the
* host_interest structure. It must match the address of the _host_interest * host_interest structure. It must match the address of the _host_interest
...@@ -445,4 +447,7 @@ Fw Mode/SubMode Mask ...@@ -445,4 +447,7 @@ Fw Mode/SubMode Mask
#define QCA988X_BOARD_DATA_SZ 7168 #define QCA988X_BOARD_DATA_SZ 7168
#define QCA988X_BOARD_EXT_DATA_SZ 0 #define QCA988X_BOARD_EXT_DATA_SZ 0
#define QCA6174_BOARD_DATA_SZ 8192
#define QCA6174_BOARD_EXT_DATA_SZ 0
#endif /* __TARGADDRS_H__ */ #endif /* __TARGADDRS_H__ */
...@@ -98,7 +98,7 @@ static int ath10k_thermal_set_cur_dutycycle(struct thermal_cooling_device *cdev, ...@@ -98,7 +98,7 @@ static int ath10k_thermal_set_cur_dutycycle(struct thermal_cooling_device *cdev,
} }
period = max(ATH10K_QUIET_PERIOD_MIN, period = max(ATH10K_QUIET_PERIOD_MIN,
(ATH10K_QUIET_PERIOD_DEFAULT / num_bss)); (ATH10K_QUIET_PERIOD_DEFAULT / num_bss));
duration = period * (duty_cycle / 100); duration = (period * duty_cycle) / 100;
enabled = duration ? 1 : 0; enabled = duration ? 1 : 0;
ret = ath10k_wmi_pdev_set_quiet_mode(ar, period, duration, ret = ath10k_wmi_pdev_set_quiet_mode(ar, period, duration,
...@@ -160,7 +160,8 @@ static ssize_t ath10k_thermal_show_temp(struct device *dev, ...@@ -160,7 +160,8 @@ static ssize_t ath10k_thermal_show_temp(struct device *dev,
temperature = ar->thermal.temperature; temperature = ar->thermal.temperature;
spin_unlock_bh(&ar->data_lock); spin_unlock_bh(&ar->data_lock);
ret = snprintf(buf, PAGE_SIZE, "%d", temperature); /* display in millidegree celcius */
ret = snprintf(buf, PAGE_SIZE, "%d\n", temperature * 1000);
out: out:
mutex_unlock(&ar->conf_mutex); mutex_unlock(&ar->conf_mutex);
return ret; return ret;
...@@ -215,7 +216,7 @@ int ath10k_thermal_register(struct ath10k *ar) ...@@ -215,7 +216,7 @@ int ath10k_thermal_register(struct ath10k *ar)
/* Avoid linking error on devm_hwmon_device_register_with_groups, I /* Avoid linking error on devm_hwmon_device_register_with_groups, I
* guess linux/hwmon.h is missing proper stubs. */ * guess linux/hwmon.h is missing proper stubs. */
if (!config_enabled(HWMON)) if (!config_enabled(CONFIG_HWMON))
return 0; return 0;
hwmon_dev = devm_hwmon_device_register_with_groups(ar->dev, hwmon_dev = devm_hwmon_device_register_with_groups(ar->dev,
......
...@@ -453,6 +453,74 @@ TRACE_EVENT(ath10k_htt_rx_desc, ...@@ -453,6 +453,74 @@ TRACE_EVENT(ath10k_htt_rx_desc,
) )
); );
TRACE_EVENT(ath10k_wmi_diag_container,
TP_PROTO(struct ath10k *ar,
u8 type,
u32 timestamp,
u32 code,
u16 len,
const void *data),
TP_ARGS(ar, type, timestamp, code, len, data),
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__field(u8, type)
__field(u32, timestamp)
__field(u32, code)
__field(u16, len)
__dynamic_array(u8, data, len)
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
__entry->type = type;
__entry->timestamp = timestamp;
__entry->code = code;
__entry->len = len;
memcpy(__get_dynamic_array(data), data, len);
),
TP_printk(
"%s %s diag container type %hhu timestamp %u code %u len %d",
__get_str(driver),
__get_str(device),
__entry->type,
__entry->timestamp,
__entry->code,
__entry->len
)
);
TRACE_EVENT(ath10k_wmi_diag,
TP_PROTO(struct ath10k *ar, const void *data, size_t len),
TP_ARGS(ar, data, len),
TP_STRUCT__entry(
__string(device, dev_name(ar->dev))
__string(driver, dev_driver_string(ar->dev))
__field(u16, len)
__dynamic_array(u8, data, len)
),
TP_fast_assign(
__assign_str(device, dev_name(ar->dev));
__assign_str(driver, dev_driver_string(ar->dev));
__entry->len = len;
memcpy(__get_dynamic_array(data), data, len);
),
TP_printk(
"%s %s tlv diag len %d",
__get_str(driver),
__get_str(device),
__entry->len
)
);
#endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/ #endif /* _TRACE_H_ || TRACE_HEADER_MULTI_READ*/
/* we don't want to use include/trace/events */ /* we don't want to use include/trace/events */
......
...@@ -64,7 +64,13 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, ...@@ -64,7 +64,13 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
return; return;
} }
msdu = htt->pending_tx[tx_done->msdu_id]; msdu = idr_find(&htt->pending_tx, tx_done->msdu_id);
if (!msdu) {
ath10k_warn(ar, "received tx completion for invalid msdu_id: %d\n",
tx_done->msdu_id);
return;
}
skb_cb = ATH10K_SKB_CB(msdu); skb_cb = ATH10K_SKB_CB(msdu);
dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
...@@ -95,7 +101,6 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt, ...@@ -95,7 +101,6 @@ void ath10k_txrx_tx_unref(struct ath10k_htt *htt,
/* we do not own the msdu anymore */ /* we do not own the msdu anymore */
exit: exit:
htt->pending_tx[tx_done->msdu_id] = NULL;
ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id); ath10k_htt_tx_free_msdu_id(htt, tx_done->msdu_id);
__ath10k_htt_tx_dec_pending(htt); __ath10k_htt_tx_dec_pending(htt);
if (htt->num_pending_tx == 0) if (htt->num_pending_tx == 0)
......
...@@ -78,6 +78,8 @@ struct wmi_ops { ...@@ -78,6 +78,8 @@ struct wmi_ops {
const struct wmi_vdev_spectral_conf_arg *arg); const struct wmi_vdev_spectral_conf_arg *arg);
struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id, struct sk_buff *(*gen_vdev_spectral_enable)(struct ath10k *ar, u32 vdev_id,
u32 trigger, u32 enable); u32 trigger, u32 enable);
struct sk_buff *(*gen_vdev_wmm_conf)(struct ath10k *ar, u32 vdev_id,
const struct wmi_wmm_params_all_arg *arg);
struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id, struct sk_buff *(*gen_peer_create)(struct ath10k *ar, u32 vdev_id,
const u8 peer_addr[ETH_ALEN]); const u8 peer_addr[ETH_ALEN]);
struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id, struct sk_buff *(*gen_peer_delete)(struct ath10k *ar, u32 vdev_id,
...@@ -102,16 +104,20 @@ struct wmi_ops { ...@@ -102,16 +104,20 @@ struct wmi_ops {
u32 value); u32 value);
struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar, struct sk_buff *(*gen_scan_chan_list)(struct ath10k *ar,
const struct wmi_scan_chan_list_arg *arg); const struct wmi_scan_chan_list_arg *arg);
struct sk_buff *(*gen_beacon_dma)(struct ath10k_vif *arvif); struct sk_buff *(*gen_beacon_dma)(struct ath10k *ar, u32 vdev_id,
const void *bcn, size_t bcn_len,
u32 bcn_paddr, bool dtim_zero,
bool deliver_cab);
struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar, struct sk_buff *(*gen_pdev_set_wmm)(struct ath10k *ar,
const struct wmi_pdev_set_wmm_params_arg *arg); const struct wmi_wmm_params_all_arg *arg);
struct sk_buff *(*gen_request_stats)(struct ath10k *ar, struct sk_buff *(*gen_request_stats)(struct ath10k *ar,
enum wmi_stats_id stats_id); enum wmi_stats_id stats_id);
struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar, struct sk_buff *(*gen_force_fw_hang)(struct ath10k *ar,
enum wmi_force_fw_hang_type type, enum wmi_force_fw_hang_type type,
u32 delay_ms); u32 delay_ms);
struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb); struct sk_buff *(*gen_mgmt_tx)(struct ath10k *ar, struct sk_buff *skb);
struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable); struct sk_buff *(*gen_dbglog_cfg)(struct ath10k *ar, u32 module_enable,
u32 log_level);
struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter); struct sk_buff *(*gen_pktlog_enable)(struct ath10k *ar, u32 filter);
struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar); struct sk_buff *(*gen_pktlog_disable)(struct ath10k *ar);
struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar, struct sk_buff *(*gen_pdev_set_quiet_mode)(struct ath10k *ar,
...@@ -119,6 +125,30 @@ struct wmi_ops { ...@@ -119,6 +125,30 @@ struct wmi_ops {
u32 next_offset, u32 next_offset,
u32 enabled); u32 enabled);
struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar); struct sk_buff *(*gen_pdev_get_temperature)(struct ath10k *ar);
struct sk_buff *(*gen_addba_clear_resp)(struct ath10k *ar, u32 vdev_id,
const u8 *mac);
struct sk_buff *(*gen_addba_send)(struct ath10k *ar, u32 vdev_id,
const u8 *mac, u32 tid, u32 buf_size);
struct sk_buff *(*gen_addba_set_resp)(struct ath10k *ar, u32 vdev_id,
const u8 *mac, u32 tid,
u32 status);
struct sk_buff *(*gen_delba_send)(struct ath10k *ar, u32 vdev_id,
const u8 *mac, u32 tid, u32 initiator,
u32 reason);
struct sk_buff *(*gen_bcn_tmpl)(struct ath10k *ar, u32 vdev_id,
u32 tim_ie_offset, struct sk_buff *bcn,
u32 prb_caps, u32 prb_erp,
void *prb_ies, size_t prb_ies_len);
struct sk_buff *(*gen_prb_tmpl)(struct ath10k *ar, u32 vdev_id,
struct sk_buff *bcn);
struct sk_buff *(*gen_p2p_go_bcn_ie)(struct ath10k *ar, u32 vdev_id,
const u8 *p2p_ie);
struct sk_buff *(*gen_vdev_sta_uapsd)(struct ath10k *ar, u32 vdev_id,
const u8 peer_addr[ETH_ALEN],
const struct wmi_sta_uapsd_auto_trig_arg *args,
u32 num_ac);
struct sk_buff *(*gen_sta_keepalive)(struct ath10k *ar,
const struct wmi_sta_keepalive_arg *arg);
}; };
int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id); int ath10k_wmi_cmd_send(struct ath10k *ar, struct sk_buff *skb, u32 cmd_id);
...@@ -557,6 +587,42 @@ ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger, ...@@ -557,6 +587,42 @@ ath10k_wmi_vdev_spectral_enable(struct ath10k *ar, u32 vdev_id, u32 trigger,
return ath10k_wmi_cmd_send(ar, skb, cmd_id); return ath10k_wmi_cmd_send(ar, skb, cmd_id);
} }
static inline int
ath10k_wmi_vdev_sta_uapsd(struct ath10k *ar, u32 vdev_id,
const u8 peer_addr[ETH_ALEN],
const struct wmi_sta_uapsd_auto_trig_arg *args,
u32 num_ac)
{
struct sk_buff *skb;
u32 cmd_id;
if (!ar->wmi.ops->gen_vdev_sta_uapsd)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_vdev_sta_uapsd(ar, vdev_id, peer_addr, args,
num_ac);
if (IS_ERR(skb))
return PTR_ERR(skb);
cmd_id = ar->wmi.cmd->sta_uapsd_auto_trig_cmdid;
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
}
static inline int
ath10k_wmi_vdev_wmm_conf(struct ath10k *ar, u32 vdev_id,
const struct wmi_wmm_params_all_arg *arg)
{
struct sk_buff *skb;
u32 cmd_id;
skb = ar->wmi.ops->gen_vdev_wmm_conf(ar, vdev_id, arg);
if (IS_ERR(skb))
return PTR_ERR(skb);
cmd_id = ar->wmi.cmd->vdev_set_wmm_params_cmdid;
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
}
static inline int static inline int
ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id, ath10k_wmi_peer_create(struct ath10k *ar, u32 vdev_id,
const u8 peer_addr[ETH_ALEN]) const u8 peer_addr[ETH_ALEN])
...@@ -706,16 +772,19 @@ ath10k_wmi_peer_assoc(struct ath10k *ar, ...@@ -706,16 +772,19 @@ ath10k_wmi_peer_assoc(struct ath10k *ar,
} }
static inline int static inline int
ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif) ath10k_wmi_beacon_send_ref_nowait(struct ath10k *ar, u32 vdev_id,
const void *bcn, size_t bcn_len,
u32 bcn_paddr, bool dtim_zero,
bool deliver_cab)
{ {
struct ath10k *ar = arvif->ar;
struct sk_buff *skb; struct sk_buff *skb;
int ret; int ret;
if (!ar->wmi.ops->gen_beacon_dma) if (!ar->wmi.ops->gen_beacon_dma)
return -EOPNOTSUPP; return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_beacon_dma(arvif); skb = ar->wmi.ops->gen_beacon_dma(ar, vdev_id, bcn, bcn_len, bcn_paddr,
dtim_zero, deliver_cab);
if (IS_ERR(skb)) if (IS_ERR(skb))
return PTR_ERR(skb); return PTR_ERR(skb);
...@@ -731,7 +800,7 @@ ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif) ...@@ -731,7 +800,7 @@ ath10k_wmi_beacon_send_ref_nowait(struct ath10k_vif *arvif)
static inline int static inline int
ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar, ath10k_wmi_pdev_set_wmm_params(struct ath10k *ar,
const struct wmi_pdev_set_wmm_params_arg *arg) const struct wmi_wmm_params_all_arg *arg)
{ {
struct sk_buff *skb; struct sk_buff *skb;
...@@ -778,14 +847,14 @@ ath10k_wmi_force_fw_hang(struct ath10k *ar, ...@@ -778,14 +847,14 @@ ath10k_wmi_force_fw_hang(struct ath10k *ar,
} }
static inline int static inline int
ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable) ath10k_wmi_dbglog_cfg(struct ath10k *ar, u32 module_enable, u32 log_level)
{ {
struct sk_buff *skb; struct sk_buff *skb;
if (!ar->wmi.ops->gen_dbglog_cfg) if (!ar->wmi.ops->gen_dbglog_cfg)
return -EOPNOTSUPP; return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable); skb = ar->wmi.ops->gen_dbglog_cfg(ar, module_enable, log_level);
if (IS_ERR(skb)) if (IS_ERR(skb))
return PTR_ERR(skb); return PTR_ERR(skb);
...@@ -857,4 +926,139 @@ ath10k_wmi_pdev_get_temperature(struct ath10k *ar) ...@@ -857,4 +926,139 @@ ath10k_wmi_pdev_get_temperature(struct ath10k *ar)
ar->wmi.cmd->pdev_get_temperature_cmdid); ar->wmi.cmd->pdev_get_temperature_cmdid);
} }
static inline int
ath10k_wmi_addba_clear_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_addba_clear_resp)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_addba_clear_resp(ar, vdev_id, mac);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->addba_clear_resp_cmdid);
}
static inline int
ath10k_wmi_addba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
u32 tid, u32 buf_size)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_addba_send)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_addba_send(ar, vdev_id, mac, tid, buf_size);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->addba_send_cmdid);
}
static inline int
ath10k_wmi_addba_set_resp(struct ath10k *ar, u32 vdev_id, const u8 *mac,
u32 tid, u32 status)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_addba_set_resp)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_addba_set_resp(ar, vdev_id, mac, tid, status);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->addba_set_resp_cmdid);
}
static inline int
ath10k_wmi_delba_send(struct ath10k *ar, u32 vdev_id, const u8 *mac,
u32 tid, u32 initiator, u32 reason)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_delba_send)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_delba_send(ar, vdev_id, mac, tid, initiator,
reason);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb,
ar->wmi.cmd->delba_send_cmdid);
}
static inline int
ath10k_wmi_bcn_tmpl(struct ath10k *ar, u32 vdev_id, u32 tim_ie_offset,
struct sk_buff *bcn, u32 prb_caps, u32 prb_erp,
void *prb_ies, size_t prb_ies_len)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_bcn_tmpl)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_bcn_tmpl(ar, vdev_id, tim_ie_offset, bcn,
prb_caps, prb_erp, prb_ies,
prb_ies_len);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->bcn_tmpl_cmdid);
}
static inline int
ath10k_wmi_prb_tmpl(struct ath10k *ar, u32 vdev_id, struct sk_buff *prb)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_prb_tmpl)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_prb_tmpl(ar, vdev_id, prb);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->prb_tmpl_cmdid);
}
static inline int
ath10k_wmi_p2p_go_bcn_ie(struct ath10k *ar, u32 vdev_id, const u8 *p2p_ie)
{
struct sk_buff *skb;
if (!ar->wmi.ops->gen_p2p_go_bcn_ie)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_p2p_go_bcn_ie(ar, vdev_id, p2p_ie);
if (IS_ERR(skb))
return PTR_ERR(skb);
return ath10k_wmi_cmd_send(ar, skb, ar->wmi.cmd->p2p_go_set_beacon_ie);
}
static inline int
ath10k_wmi_sta_keepalive(struct ath10k *ar,
const struct wmi_sta_keepalive_arg *arg)
{
struct sk_buff *skb;
u32 cmd_id;
if (!ar->wmi.ops->gen_sta_keepalive)
return -EOPNOTSUPP;
skb = ar->wmi.ops->gen_sta_keepalive(ar, arg);
if (IS_ERR(skb))
return PTR_ERR(skb);
cmd_id = ar->wmi.cmd->sta_keepalive_cmd;
return ath10k_wmi_cmd_send(ar, skb, cmd_id);
}
#endif #endif
This diff is collapsed.
...@@ -1302,6 +1302,10 @@ struct wmi_tlv_pdev_set_wmm_cmd { ...@@ -1302,6 +1302,10 @@ struct wmi_tlv_pdev_set_wmm_cmd {
__le32 dg_type; /* no idea.. */ __le32 dg_type; /* no idea.. */
} __packed; } __packed;
struct wmi_tlv_vdev_set_wmm_cmd {
__le32 vdev_id;
} __packed;
struct wmi_tlv_phyerr_ev { struct wmi_tlv_phyerr_ev {
__le32 num_phyerrs; __le32 num_phyerrs;
__le32 tsf_l32; __le32 tsf_l32;
...@@ -1375,6 +1379,66 @@ struct wmi_tlv_pktlog_disable { ...@@ -1375,6 +1379,66 @@ struct wmi_tlv_pktlog_disable {
__le32 reserved; __le32 reserved;
} __packed; } __packed;
enum wmi_tlv_bcn_tx_status {
WMI_TLV_BCN_TX_STATUS_OK,
WMI_TLV_BCN_TX_STATUS_XRETRY,
WMI_TLV_BCN_TX_STATUS_DROP,
WMI_TLV_BCN_TX_STATUS_FILTERED,
};
struct wmi_tlv_bcn_tx_status_ev {
__le32 vdev_id;
__le32 tx_status;
} __packed;
struct wmi_tlv_bcn_prb_info {
__le32 caps;
__le32 erp;
u8 ies[0];
} __packed;
struct wmi_tlv_bcn_tmpl_cmd {
__le32 vdev_id;
__le32 tim_ie_offset;
__le32 buf_len;
} __packed;
struct wmi_tlv_prb_tmpl_cmd {
__le32 vdev_id;
__le32 buf_len;
} __packed;
struct wmi_tlv_p2p_go_bcn_ie {
__le32 vdev_id;
__le32 ie_len;
} __packed;
enum wmi_tlv_diag_item_type {
WMI_TLV_DIAG_ITEM_TYPE_FW_EVENT,
WMI_TLV_DIAG_ITEM_TYPE_FW_LOG,
WMI_TLV_DIAG_ITEM_TYPE_FW_DEBUG_MSG,
};
struct wmi_tlv_diag_item {
u8 type;
u8 reserved;
__le16 len;
__le32 timestamp;
__le32 code;
u8 payload[0];
} __packed;
struct wmi_tlv_diag_data_ev {
__le32 num_items;
} __packed;
struct wmi_tlv_sta_keepalive_cmd {
__le32 vdev_id;
__le32 enabled;
__le32 method; /* WMI_STA_KEEPALIVE_METHOD_ */
__le32 interval; /* in seconds */
} __packed;
void ath10k_wmi_tlv_attach(struct ath10k *ar); void ath10k_wmi_tlv_attach(struct ath10k *ar);
#endif #endif
This diff is collapsed.
This diff is collapsed.
...@@ -478,7 +478,7 @@ ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags) ...@@ -478,7 +478,7 @@ ath5k_hw_wisoc_reset(struct ath5k_hw *ah, u32 flags)
regval = ioread32(reg); regval = ioread32(reg);
iowrite32(regval | val, reg); iowrite32(regval | val, reg);
regval = ioread32(reg); regval = ioread32(reg);
usleep_range(100, 150); udelay(100); /* NB: should be atomic */
/* Bring BB/MAC out of reset */ /* Bring BB/MAC out of reset */
iowrite32(regval & ~val, reg); iowrite32(regval & ~val, reg);
......
...@@ -1203,24 +1203,41 @@ static void ar9003_hw_tx_iq_cal_reload(struct ath_hw *ah) ...@@ -1203,24 +1203,41 @@ static void ar9003_hw_tx_iq_cal_reload(struct ath_hw *ah)
static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g) static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
{ {
int offset[8] = {0}, total = 0, test; int offset[8] = {0}, total = 0, test;
int agc_out, i; int agc_out, i, peak_detect_threshold;
if (AR_SREV_9550(ah) || AR_SREV_9531(ah))
peak_detect_threshold = 8;
else
peak_detect_threshold = 0;
/*
* Turn off LNA/SW.
*/
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain), REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
AR_PHY_65NM_RXRF_GAINSTAGES_RX_OVERRIDE, 0x1); AR_PHY_65NM_RXRF_GAINSTAGES_RX_OVERRIDE, 0x1);
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain), REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
AR_PHY_65NM_RXRF_GAINSTAGES_LNAON_CALDC, 0x0); AR_PHY_65NM_RXRF_GAINSTAGES_LNAON_CALDC, 0x0);
if (is_2g)
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
AR_PHY_65NM_RXRF_GAINSTAGES_LNA2G_GAIN_OVR, 0x0);
else
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
AR_PHY_65NM_RXRF_GAINSTAGES_LNA5G_GAIN_OVR, 0x0);
if (AR_SREV_9003_PCOEM(ah) || AR_SREV_9330_11(ah)) {
if (is_2g)
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
AR_PHY_65NM_RXRF_GAINSTAGES_LNA2G_GAIN_OVR, 0x0);
else
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
AR_PHY_65NM_RXRF_GAINSTAGES_LNA5G_GAIN_OVR, 0x0);
}
/*
* Turn off RXON.
*/
REG_RMW_FIELD(ah, AR_PHY_65NM_RXTX2(chain), REG_RMW_FIELD(ah, AR_PHY_65NM_RXTX2(chain),
AR_PHY_65NM_RXTX2_RXON_OVR, 0x1); AR_PHY_65NM_RXTX2_RXON_OVR, 0x1);
REG_RMW_FIELD(ah, AR_PHY_65NM_RXTX2(chain), REG_RMW_FIELD(ah, AR_PHY_65NM_RXTX2(chain),
AR_PHY_65NM_RXTX2_RXON, 0x0); AR_PHY_65NM_RXTX2_RXON, 0x0);
/*
* Turn on AGC for cal.
*/
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE, 0x1); AR_PHY_65NM_RXRF_AGC_AGC_OVERRIDE, 0x1);
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
...@@ -1228,16 +1245,19 @@ static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g) ...@@ -1228,16 +1245,19 @@ static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR, 0x1); AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR, 0x1);
if (AR_SREV_9330_11(ah)) { if (AR_SREV_9330_11(ah))
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR, 0x0); AR_PHY_65NM_RXRF_AGC_AGC2G_CALDAC_OVR, 0x0);
} else {
if (AR_SREV_9003_PCOEM(ah) || AR_SREV_9550(ah) || AR_SREV_9531(ah)) {
if (is_2g) if (is_2g)
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR, 0x0); AR_PHY_65NM_RXRF_AGC_AGC2G_DBDAC_OVR,
peak_detect_threshold);
else else
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR, 0x0); AR_PHY_65NM_RXRF_AGC_AGC5G_DBDAC_OVR,
peak_detect_threshold);
} }
for (i = 6; i > 0; i--) { for (i = 6; i > 0; i--) {
...@@ -1266,10 +1286,19 @@ static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g) ...@@ -1266,10 +1286,19 @@ static void ar9003_hw_manual_peak_cal(struct ath_hw *ah, u8 chain, bool is_2g)
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR, total); AR_PHY_65NM_RXRF_AGC_AGC5G_CALDAC_OVR, total);
/*
* Turn on LNA.
*/
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain), REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_GAINSTAGES(chain),
AR_PHY_65NM_RXRF_GAINSTAGES_RX_OVERRIDE, 0); AR_PHY_65NM_RXRF_GAINSTAGES_RX_OVERRIDE, 0);
/*
* Turn off RXON.
*/
REG_RMW_FIELD(ah, AR_PHY_65NM_RXTX2(chain), REG_RMW_FIELD(ah, AR_PHY_65NM_RXTX2(chain),
AR_PHY_65NM_RXTX2_RXON_OVR, 0); AR_PHY_65NM_RXTX2_RXON_OVR, 0);
/*
* Turn off peak detect calibration.
*/
REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain), REG_RMW_FIELD(ah, AR_PHY_65NM_RXRF_AGC(chain),
AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR, 0); AR_PHY_65NM_RXRF_AGC_AGC_CAL_OVR, 0);
} }
...@@ -1611,8 +1640,14 @@ static bool ar9003_hw_init_cal_soc(struct ath_hw *ah, ...@@ -1611,8 +1640,14 @@ static bool ar9003_hw_init_cal_soc(struct ath_hw *ah,
skip_tx_iqcal: skip_tx_iqcal:
if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) { if (run_agc_cal || !(ah->ah_flags & AH_FASTCC)) {
if (AR_SREV_9330_11(ah)) if (AR_SREV_9330_11(ah) || AR_SREV_9531(ah) || AR_SREV_9550(ah)) {
ar9003_hw_manual_peak_cal(ah, 0, IS_CHAN_2GHZ(chan)); for (i = 0; i < AR9300_MAX_CHAINS; i++) {
if (!(ah->rxchainmask & (1 << i)))
continue;
ar9003_hw_manual_peak_cal(ah, i,
IS_CHAN_2GHZ(chan));
}
}
/* /*
* For non-AR9550 chips, we just trigger AGC calibration * For non-AR9550 chips, we just trigger AGC calibration
......
This diff is collapsed.
...@@ -358,7 +358,7 @@ static const u32 qca953x_1p0_baseband_postamble[][5] = { ...@@ -358,7 +358,7 @@ static const u32 qca953x_1p0_baseband_postamble[][5] = {
{0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c}, {0x00009e1c, 0x0001cf9c, 0x0001cf9c, 0x00021f9c, 0x00021f9c},
{0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce}, {0x00009e20, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce},
{0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021}, {0x00009e2c, 0x0000001c, 0x0000001c, 0x00000021, 0x00000021},
{0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcfa10822, 0xcfa10822}, {0x00009e3c, 0xcfa10820, 0xcfa10820, 0xcfa10820, 0xcfa10820},
{0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27}, {0x00009e44, 0xfe321e27, 0xfe321e27, 0xfe291e27, 0xfe291e27},
{0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012}, {0x00009e48, 0x5030201a, 0x5030201a, 0x50302012, 0x50302012},
{0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000}, {0x00009fc8, 0x0003f000, 0x0003f000, 0x0001a000, 0x0001a000},
...@@ -378,7 +378,7 @@ static const u32 qca953x_1p0_baseband_postamble[][5] = { ...@@ -378,7 +378,7 @@ static const u32 qca953x_1p0_baseband_postamble[][5] = {
{0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010}, {0x0000a284, 0x00000000, 0x00000000, 0x00000010, 0x00000010},
{0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110}, {0x0000a288, 0x00000110, 0x00000110, 0x00000110, 0x00000110},
{0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222}, {0x0000a28c, 0x00022222, 0x00022222, 0x00022222, 0x00022222},
{0x0000a2c4, 0x00158d18, 0x00158d18, 0x00158d18, 0x00158d18}, {0x0000a2c4, 0x00158d18, 0x00158d18, 0x00058d18, 0x00058d18},
{0x0000a2cc, 0x18c50033, 0x18c43433, 0x18c41033, 0x18c44c33}, {0x0000a2cc, 0x18c50033, 0x18c43433, 0x18c41033, 0x18c44c33},
{0x0000a2d0, 0x00041982, 0x00041982, 0x00041982, 0x00041982}, {0x0000a2d0, 0x00041982, 0x00041982, 0x00041982, 0x00041982},
{0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b}, {0x0000a2d8, 0x7999a83b, 0x7999a83b, 0x7999a83b, 0x7999a83b},
......
This diff is collapsed.
...@@ -582,7 +582,7 @@ static struct rchan_callbacks rfs_spec_scan_cb = { ...@@ -582,7 +582,7 @@ static struct rchan_callbacks rfs_spec_scan_cb = {
void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv) void ath9k_cmn_spectral_deinit_debug(struct ath_spec_scan_priv *spec_priv)
{ {
if (config_enabled(CONFIG_ATH9K_DEBUGFS) && spec_priv->rfs_chan_spec_scan) { if (config_enabled(CONFIG_ATH9K_DEBUGFS)) {
relay_close(spec_priv->rfs_chan_spec_scan); relay_close(spec_priv->rfs_chan_spec_scan);
spec_priv->rfs_chan_spec_scan = NULL; spec_priv->rfs_chan_spec_scan = NULL;
} }
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment