Commit c2786e4a authored by John W. Linville's avatar John W. Linville

Merge branch 'for-linville' of git://github.com/kvalo/ath6kl

parents 80652480 d97c121b
......@@ -25,7 +25,8 @@
obj-$(CONFIG_ATH6KL) += ath6kl_core.o
ath6kl_core-y += debug.o
ath6kl_core-y += hif.o
ath6kl_core-y += htc.o
ath6kl_core-y += htc_mbox.o
ath6kl_core-y += htc_pipe.o
ath6kl_core-y += bmi.o
ath6kl_core-y += cfg80211.o
ath6kl_core-y += init.o
......
......@@ -51,6 +51,8 @@
.max_power = 30, \
}
#define DEFAULT_BG_SCAN_PERIOD 60
static struct ieee80211_rate ath6kl_rates[] = {
RATETAB_ENT(10, 0x1, 0),
RATETAB_ENT(20, 0x2, 0),
......@@ -71,7 +73,8 @@ static struct ieee80211_rate ath6kl_rates[] = {
#define ath6kl_g_rates (ath6kl_rates + 0)
#define ath6kl_g_rates_size 12
#define ath6kl_g_htcap (IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \
#define ath6kl_g_htcap IEEE80211_HT_CAP_SGI_20
#define ath6kl_a_htcap (IEEE80211_HT_CAP_SUP_WIDTH_20_40 | \
IEEE80211_HT_CAP_SGI_20 | \
IEEE80211_HT_CAP_SGI_40)
......@@ -128,7 +131,7 @@ static struct ieee80211_supported_band ath6kl_band_5ghz = {
.channels = ath6kl_5ghz_a_channels,
.n_bitrates = ath6kl_a_rates_size,
.bitrates = ath6kl_a_rates,
.ht_cap.cap = ath6kl_g_htcap,
.ht_cap.cap = ath6kl_a_htcap,
.ht_cap.ht_supported = true,
};
......@@ -609,6 +612,17 @@ static int ath6kl_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
vif->req_bssid, vif->ch_hint,
ar->connect_ctrl_flags, nw_subtype);
/* disable background scan if period is 0 */
if (sme->bg_scan_period == 0)
sme->bg_scan_period = 0xffff;
/* configure default value if not specified */
if (sme->bg_scan_period == -1)
sme->bg_scan_period = DEFAULT_BG_SCAN_PERIOD;
ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx, 0, 0,
sme->bg_scan_period, 0, 0, 0, 3, 0, 0, 0);
up(&ar->sem);
if (status == -EINVAL) {
......@@ -943,6 +957,8 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
if (test_bit(CONNECTED, &vif->flags))
force_fg_scan = 1;
vif->scan_req = request;
if (test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
ar->fw_capabilities)) {
/*
......@@ -965,10 +981,10 @@ static int ath6kl_cfg80211_scan(struct wiphy *wiphy, struct net_device *ndev,
ATH6KL_FG_SCAN_INTERVAL,
n_channels, channels);
}
if (ret)
if (ret) {
ath6kl_err("wmi_startscan_cmd failed\n");
else
vif->scan_req = request;
vif->scan_req = NULL;
}
kfree(channels);
......@@ -1438,9 +1454,38 @@ static int ath6kl_cfg80211_change_iface(struct wiphy *wiphy,
struct vif_params *params)
{
struct ath6kl_vif *vif = netdev_priv(ndev);
int i;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: type %u\n", __func__, type);
/*
* Don't bring up p2p on an interface which is not initialized
* for p2p operation where fw does not have capability to switch
* dynamically between non-p2p and p2p type interface.
*/
if (!test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
vif->ar->fw_capabilities) &&
(type == NL80211_IFTYPE_P2P_CLIENT ||
type == NL80211_IFTYPE_P2P_GO)) {
if (vif->ar->vif_max == 1) {
if (vif->fw_vif_idx != 0)
return -EINVAL;
else
goto set_iface_type;
}
for (i = vif->ar->max_norm_iface; i < vif->ar->vif_max; i++) {
if (i == vif->fw_vif_idx)
break;
}
if (i == vif->ar->vif_max) {
ath6kl_err("Invalid interface to bring up P2P\n");
return -EINVAL;
}
}
set_iface_type:
switch (type) {
case NL80211_IFTYPE_STATION:
vif->next_mode = INFRA_NETWORK;
......@@ -1926,12 +1971,61 @@ static int ath6kl_wow_sta(struct ath6kl *ar, struct ath6kl_vif *vif)
return 0;
}
static int is_hsleep_mode_procsed(struct ath6kl_vif *vif)
{
return test_bit(HOST_SLEEP_MODE_CMD_PROCESSED, &vif->flags);
}
static bool is_ctrl_ep_empty(struct ath6kl *ar)
{
return !ar->tx_pending[ar->ctrl_ep];
}
static int ath6kl_cfg80211_host_sleep(struct ath6kl *ar, struct ath6kl_vif *vif)
{
int ret, left;
clear_bit(HOST_SLEEP_MODE_CMD_PROCESSED, &vif->flags);
ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
ATH6KL_HOST_MODE_ASLEEP);
if (ret)
return ret;
left = wait_event_interruptible_timeout(ar->event_wq,
is_hsleep_mode_procsed(vif),
WMI_TIMEOUT);
if (left == 0) {
ath6kl_warn("timeout, didn't get host sleep cmd processed event\n");
ret = -ETIMEDOUT;
} else if (left < 0) {
ath6kl_warn("error while waiting for host sleep cmd processed event %d\n",
left);
ret = left;
}
if (ar->tx_pending[ar->ctrl_ep]) {
left = wait_event_interruptible_timeout(ar->event_wq,
is_ctrl_ep_empty(ar),
WMI_TIMEOUT);
if (left == 0) {
ath6kl_warn("clear wmi ctrl data timeout\n");
ret = -ETIMEDOUT;
} else if (left < 0) {
ath6kl_warn("clear wmi ctrl data failed: %d\n", left);
ret = left;
}
}
return ret;
}
static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
{
struct in_device *in_dev;
struct in_ifaddr *ifa;
struct ath6kl_vif *vif;
int ret, left;
int ret;
u32 filter = 0;
u16 i, bmiss_time;
u8 index = 0;
......@@ -2032,39 +2126,11 @@ static int ath6kl_wow_suspend(struct ath6kl *ar, struct cfg80211_wowlan *wow)
if (ret)
return ret;
clear_bit(HOST_SLEEP_MODE_CMD_PROCESSED, &vif->flags);
ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
ATH6KL_HOST_MODE_ASLEEP);
ret = ath6kl_cfg80211_host_sleep(ar, vif);
if (ret)
return ret;
left = wait_event_interruptible_timeout(ar->event_wq,
test_bit(HOST_SLEEP_MODE_CMD_PROCESSED, &vif->flags),
WMI_TIMEOUT);
if (left == 0) {
ath6kl_warn("timeout, didn't get host sleep cmd "
"processed event\n");
ret = -ETIMEDOUT;
} else if (left < 0) {
ath6kl_warn("error while waiting for host sleep cmd "
"processed event %d\n", left);
ret = left;
}
if (ar->tx_pending[ar->ctrl_ep]) {
left = wait_event_interruptible_timeout(ar->event_wq,
ar->tx_pending[ar->ctrl_ep] == 0, WMI_TIMEOUT);
if (left == 0) {
ath6kl_warn("clear wmi ctrl data timeout\n");
ret = -ETIMEDOUT;
} else if (left < 0) {
ath6kl_warn("clear wmi ctrl data failed: %d\n", left);
ret = left;
}
}
return ret;
return 0;
}
static int ath6kl_wow_resume(struct ath6kl *ar)
......@@ -2111,10 +2177,82 @@ static int ath6kl_wow_resume(struct ath6kl *ar)
return 0;
}
static int ath6kl_cfg80211_deepsleep_suspend(struct ath6kl *ar)
{
struct ath6kl_vif *vif;
int ret;
vif = ath6kl_vif_first(ar);
if (!vif)
return -EIO;
if (!ath6kl_cfg80211_ready(vif))
return -EIO;
ath6kl_cfg80211_stop_all(ar);
/* Save the current power mode before enabling power save */
ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode;
ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0, REC_POWER);
if (ret)
return ret;
/* Disable WOW mode */
ret = ath6kl_wmi_set_wow_mode_cmd(ar->wmi, vif->fw_vif_idx,
ATH6KL_WOW_MODE_DISABLE,
0, 0);
if (ret)
return ret;
/* Flush all non control pkts in TX path */
ath6kl_tx_data_cleanup(ar);
ret = ath6kl_cfg80211_host_sleep(ar, vif);
if (ret)
return ret;
return 0;
}
static int ath6kl_cfg80211_deepsleep_resume(struct ath6kl *ar)
{
struct ath6kl_vif *vif;
int ret;
vif = ath6kl_vif_first(ar);
if (!vif)
return -EIO;
if (ar->wmi->pwr_mode != ar->wmi->saved_pwr_mode) {
ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0,
ar->wmi->saved_pwr_mode);
if (ret)
return ret;
}
ret = ath6kl_wmi_set_host_sleep_mode_cmd(ar->wmi, vif->fw_vif_idx,
ATH6KL_HOST_MODE_AWAKE);
if (ret)
return ret;
ar->state = ATH6KL_STATE_ON;
/* Reset scan parameter to default values */
ret = ath6kl_wmi_scanparams_cmd(ar->wmi, vif->fw_vif_idx,
0, 0, 0, 0, 0, 0, 3, 0, 0, 0);
if (ret)
return ret;
return 0;
}
int ath6kl_cfg80211_suspend(struct ath6kl *ar,
enum ath6kl_cfg_suspend_mode mode,
struct cfg80211_wowlan *wow)
{
struct ath6kl_vif *vif;
enum ath6kl_state prev_state;
int ret;
......@@ -2139,15 +2277,12 @@ int ath6kl_cfg80211_suspend(struct ath6kl *ar,
case ATH6KL_CFG_SUSPEND_DEEPSLEEP:
ath6kl_cfg80211_stop_all(ar);
/* save the current power mode before enabling power save */
ar->wmi->saved_pwr_mode = ar->wmi->pwr_mode;
ath6kl_dbg(ATH6KL_DBG_SUSPEND, "deep sleep suspend\n");
ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0, REC_POWER);
ret = ath6kl_cfg80211_deepsleep_suspend(ar);
if (ret) {
ath6kl_warn("wmi powermode command failed during suspend: %d\n",
ret);
ath6kl_err("deepsleep suspend failed: %d\n", ret);
return ret;
}
ar->state = ATH6KL_STATE_DEEPSLEEP;
......@@ -2187,6 +2322,9 @@ int ath6kl_cfg80211_suspend(struct ath6kl *ar,
break;
}
list_for_each_entry(vif, &ar->vif_list, list)
ath6kl_cfg80211_scan_complete_event(vif, true);
return 0;
}
EXPORT_SYMBOL(ath6kl_cfg80211_suspend);
......@@ -2208,17 +2346,13 @@ int ath6kl_cfg80211_resume(struct ath6kl *ar)
break;
case ATH6KL_STATE_DEEPSLEEP:
if (ar->wmi->pwr_mode != ar->wmi->saved_pwr_mode) {
ret = ath6kl_wmi_powermode_cmd(ar->wmi, 0,
ar->wmi->saved_pwr_mode);
if (ret) {
ath6kl_warn("wmi powermode command failed during resume: %d\n",
ret);
}
}
ar->state = ATH6KL_STATE_ON;
ath6kl_dbg(ATH6KL_DBG_SUSPEND, "deep sleep resume\n");
ret = ath6kl_cfg80211_deepsleep_resume(ar);
if (ret) {
ath6kl_warn("deep sleep resume failed: %d\n", ret);
return ret;
}
break;
case ATH6KL_STATE_CUTPOWER:
......@@ -2292,31 +2426,25 @@ void ath6kl_check_wow_status(struct ath6kl *ar)
}
#endif
static int ath6kl_set_channel(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type)
static int ath6kl_set_htcap(struct ath6kl_vif *vif, enum ieee80211_band band,
bool ht_enable)
{
struct ath6kl_vif *vif;
/*
* 'dev' could be NULL if a channel change is required for the hardware
* device itself, instead of a particular VIF.
*
* FIXME: To be handled properly when monitor mode is supported.
*/
if (!dev)
return -EBUSY;
vif = netdev_priv(dev);
struct ath6kl_htcap *htcap = &vif->htcap;
if (!ath6kl_cfg80211_ready(vif))
return -EIO;
if (htcap->ht_enable == ht_enable)
return 0;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: center_freq=%u hw_value=%u\n",
__func__, chan->center_freq, chan->hw_value);
vif->next_chan = chan->center_freq;
if (ht_enable) {
/* Set default ht capabilities */
htcap->ht_enable = true;
htcap->cap_info = (band == IEEE80211_BAND_2GHZ) ?
ath6kl_g_htcap : ath6kl_a_htcap;
htcap->ampdu_factor = IEEE80211_HT_MAX_AMPDU_16K;
} else /* Disable ht */
memset(htcap, 0, sizeof(*htcap));
return 0;
return ath6kl_wmi_set_htcap_cmd(vif->ar->wmi, vif->fw_vif_idx,
band, htcap);
}
static bool ath6kl_is_p2p_ie(const u8 *pos)
......@@ -2393,6 +2521,81 @@ static int ath6kl_set_ies(struct ath6kl_vif *vif,
return 0;
}
static int ath6kl_set_channel(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_channel *chan,
enum nl80211_channel_type channel_type)
{
struct ath6kl_vif *vif;
/*
* 'dev' could be NULL if a channel change is required for the hardware
* device itself, instead of a particular VIF.
*
* FIXME: To be handled properly when monitor mode is supported.
*/
if (!dev)
return -EBUSY;
vif = netdev_priv(dev);
if (!ath6kl_cfg80211_ready(vif))
return -EIO;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s: center_freq=%u hw_value=%u\n",
__func__, chan->center_freq, chan->hw_value);
vif->next_chan = chan->center_freq;
vif->next_ch_type = channel_type;
vif->next_ch_band = chan->band;
return 0;
}
static int ath6kl_get_rsn_capab(struct cfg80211_beacon_data *beacon,
u8 *rsn_capab)
{
const u8 *rsn_ie;
size_t rsn_ie_len;
u16 cnt;
if (!beacon->tail)
return -EINVAL;
rsn_ie = cfg80211_find_ie(WLAN_EID_RSN, beacon->tail, beacon->tail_len);
if (!rsn_ie)
return -EINVAL;
rsn_ie_len = *(rsn_ie + 1);
/* skip element id and length */
rsn_ie += 2;
/* skip version, group cipher */
if (rsn_ie_len < 6)
return -EINVAL;
rsn_ie += 6;
rsn_ie_len -= 6;
/* skip pairwise cipher suite */
if (rsn_ie_len < 2)
return -EINVAL;
cnt = *((u16 *) rsn_ie);
rsn_ie += (2 + cnt * 4);
rsn_ie_len -= (2 + cnt * 4);
/* skip akm suite */
if (rsn_ie_len < 2)
return -EINVAL;
cnt = *((u16 *) rsn_ie);
rsn_ie += (2 + cnt * 4);
rsn_ie_len -= (2 + cnt * 4);
if (rsn_ie_len < 2)
return -EINVAL;
memcpy(rsn_capab, rsn_ie, 2);
return 0;
}
static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_ap_settings *info)
{
......@@ -2405,6 +2608,7 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
struct wmi_connect_cmd p;
int res;
int i, ret;
u16 rsn_capab = 0;
ath6kl_dbg(ATH6KL_DBG_WLAN_CFG, "%s:\n", __func__);
......@@ -2534,6 +2738,34 @@ static int ath6kl_start_ap(struct wiphy *wiphy, struct net_device *dev,
p.nw_subtype = SUBTYPE_NONE;
}
if (info->inactivity_timeout) {
res = ath6kl_wmi_set_inact_period(ar->wmi, vif->fw_vif_idx,
info->inactivity_timeout);
if (res < 0)
return res;
}
if (ath6kl_set_htcap(vif, vif->next_ch_band,
vif->next_ch_type != NL80211_CHAN_NO_HT))
return -EIO;
/*
* Get the PTKSA replay counter in the RSN IE. Supplicant
* will use the RSN IE in M3 message and firmware has to
* advertise the same in beacon/probe response. Send
* the complete RSN IE capability field to firmware
*/
if (!ath6kl_get_rsn_capab(&info->beacon, (u8 *) &rsn_capab) &&
test_bit(ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE,
ar->fw_capabilities)) {
res = ath6kl_wmi_set_ie_cmd(ar->wmi, vif->fw_vif_idx,
WLAN_EID_RSN, WMI_RSN_IE_CAPB,
(const u8 *) &rsn_capab,
sizeof(rsn_capab));
if (res < 0)
return res;
}
res = ath6kl_wmi_ap_profile_commit(ar->wmi, vif->fw_vif_idx, &p);
if (res < 0)
return res;
......@@ -2568,6 +2800,13 @@ static int ath6kl_stop_ap(struct wiphy *wiphy, struct net_device *dev)
ath6kl_wmi_disconnect_cmd(ar->wmi, vif->fw_vif_idx);
clear_bit(CONNECTED, &vif->flags);
/* Restore ht setting in firmware */
if (ath6kl_set_htcap(vif, IEEE80211_BAND_2GHZ, true))
return -EIO;
if (ath6kl_set_htcap(vif, IEEE80211_BAND_5GHZ, true))
return -EIO;
return 0;
}
......@@ -2749,6 +2988,21 @@ static bool ath6kl_mgmt_powersave_ap(struct ath6kl_vif *vif,
return false;
}
/* Check if SSID length is greater than DIRECT- */
static bool ath6kl_is_p2p_go_ssid(const u8 *buf, size_t len)
{
const struct ieee80211_mgmt *mgmt;
mgmt = (const struct ieee80211_mgmt *) buf;
/* variable[1] contains the SSID tag length */
if (buf + len >= &mgmt->u.probe_resp.variable[1] &&
(mgmt->u.probe_resp.variable[1] > P2P_WILDCARD_SSID_LEN)) {
return true;
}
return false;
}
static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
struct ieee80211_channel *chan, bool offchan,
enum nl80211_channel_type channel_type,
......@@ -2763,11 +3017,11 @@ static int ath6kl_mgmt_tx(struct wiphy *wiphy, struct net_device *dev,
bool more_data, queued;
mgmt = (const struct ieee80211_mgmt *) buf;
if (buf + len >= mgmt->u.probe_resp.variable &&
vif->nw_type == AP_NETWORK && test_bit(CONNECTED, &vif->flags) &&
ieee80211_is_probe_resp(mgmt->frame_control)) {
if (vif->nw_type == AP_NETWORK && test_bit(CONNECTED, &vif->flags) &&
ieee80211_is_probe_resp(mgmt->frame_control) &&
ath6kl_is_p2p_go_ssid(buf, len)) {
/*
* Send Probe Response frame in AP mode using a separate WMI
* Send Probe Response frame in GO mode using a separate WMI
* command to allow the target to fill in the generic IEs.
*/
*cookie = 0; /* TX status not supported */
......@@ -2835,6 +3089,8 @@ static int ath6kl_cfg80211_sscan_start(struct wiphy *wiphy,
if (vif->sme_state != SME_DISCONNECTED)
return -EBUSY;
ath6kl_cfg80211_scan_complete_event(vif, true);
for (i = 0; i < ar->wiphy->max_sched_scan_ssids; i++) {
ath6kl_wmi_probedssid_cmd(ar->wmi, vif->fw_vif_idx,
i, DISABLE_SSID_FLAG,
......@@ -3096,6 +3352,7 @@ struct net_device *ath6kl_interface_add(struct ath6kl *ar, char *name,
vif->next_mode = nw_type;
vif->listen_intvl_t = ATH6KL_DEFAULT_LISTEN_INTVAL;
vif->bmiss_time_t = ATH6KL_DEFAULT_BMISS_TIME;
vif->htcap.ht_enable = true;
memcpy(ndev->dev_addr, ar->mac_addr, ETH_ALEN);
if (fw_vif_idx != 0)
......@@ -3183,6 +3440,10 @@ int ath6kl_cfg80211_init(struct ath6kl *ar)
if (test_bit(ATH6KL_FW_CAPABILITY_SCHED_SCAN, ar->fw_capabilities))
ar->wiphy->flags |= WIPHY_FLAG_SUPPORTS_SCHED_SCAN;
if (test_bit(ATH6KL_FW_CAPABILITY_INACTIVITY_TIMEOUT,
ar->fw_capabilities))
ar->wiphy->features = NL80211_FEATURE_INACTIVITY_TIMER;
ar->wiphy->probe_resp_offload =
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
......
......@@ -22,7 +22,8 @@
#define ATH6KL_MAX_IE 256
extern int ath6kl_printk(const char *level, const char *fmt, ...);
extern __printf(2, 3)
int ath6kl_printk(const char *level, const char *fmt, ...);
/*
* Reflects the version of binary interface exposed by ATH6KL target
......@@ -77,6 +78,7 @@ enum crypto_type {
struct htc_endpoint_credit_dist;
struct ath6kl;
struct ath6kl_htcap;
enum htc_credit_dist_reason;
struct ath6kl_htc_credit_info;
......
......@@ -20,9 +20,11 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/export.h>
#include <linux/vmalloc.h>
#include "debug.h"
#include "hif-ops.h"
#include "htc-ops.h"
#include "cfg80211.h"
unsigned int debug_mask;
......@@ -39,12 +41,36 @@ module_param(uart_debug, uint, 0644);
module_param(ath6kl_p2p, uint, 0644);
module_param(testmode, uint, 0644);
int ath6kl_core_init(struct ath6kl *ar)
void ath6kl_core_tx_complete(struct ath6kl *ar, struct sk_buff *skb)
{
ath6kl_htc_tx_complete(ar, skb);
}
EXPORT_SYMBOL(ath6kl_core_tx_complete);
void ath6kl_core_rx_complete(struct ath6kl *ar, struct sk_buff *skb, u8 pipe)
{
ath6kl_htc_rx_complete(ar, skb, pipe);
}
EXPORT_SYMBOL(ath6kl_core_rx_complete);
int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type)
{
struct ath6kl_bmi_target_info targ_info;
struct net_device *ndev;
int ret = 0, i;
switch (htc_type) {
case ATH6KL_HTC_TYPE_MBOX:
ath6kl_htc_mbox_attach(ar);
break;
case ATH6KL_HTC_TYPE_PIPE:
ath6kl_htc_pipe_attach(ar);
break;
default:
WARN_ON(1);
return -ENOMEM;
}
ar->ath6kl_wq = create_singlethread_workqueue("ath6kl");
if (!ar->ath6kl_wq)
return -ENOMEM;
......@@ -280,7 +306,7 @@ void ath6kl_core_cleanup(struct ath6kl *ar)
kfree(ar->fw_board);
kfree(ar->fw_otp);
kfree(ar->fw);
vfree(ar->fw);
kfree(ar->fw_patch);
kfree(ar->fw_testscript);
......
......@@ -91,6 +91,15 @@ enum ath6kl_fw_capability {
*/
ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
/*
* Firmware has support to cleanup inactive stations
* in AP mode.
*/
ATH6KL_FW_CAPABILITY_INACTIVITY_TIMEOUT,
/* Firmware has support to override rsn cap of rsn ie */
ATH6KL_FW_CAPABILITY_RSN_CAP_OVERRIDE,
/* this needs to be last */
ATH6KL_FW_CAPABILITY_MAX,
};
......@@ -205,6 +214,8 @@ struct ath6kl_fw_ie {
#define ATH6KL_CONF_ENABLE_TX_BURST BIT(3)
#define ATH6KL_CONF_UART_DEBUG BIT(4)
#define P2P_WILDCARD_SSID_LEN 7 /* DIRECT- */
enum wlan_low_pwr_state {
WLAN_POWER_STATE_ON,
WLAN_POWER_STATE_CUT_PWR,
......@@ -454,6 +465,11 @@ enum ath6kl_hif_type {
ATH6KL_HIF_TYPE_USB,
};
enum ath6kl_htc_type {
ATH6KL_HTC_TYPE_MBOX,
ATH6KL_HTC_TYPE_PIPE,
};
/* Max number of filters that hw supports */
#define ATH6K_MAX_MC_FILTERS_PER_LIST 7
struct ath6kl_mc_filter {
......@@ -461,6 +477,12 @@ struct ath6kl_mc_filter {
char hw_addr[ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE];
};
struct ath6kl_htcap {
bool ht_enable;
u8 ampdu_factor;
unsigned short cap_info;
};
/*
* Driver's maximum limit, note that some firmwares support only one vif
* and the runtime (current) limit must be checked from ar->vif_max.
......@@ -509,6 +531,7 @@ struct ath6kl_vif {
struct ath6kl_wep_key wep_key_list[WMI_MAX_KEY_INDEX + 1];
struct ath6kl_key keys[WMI_MAX_KEY_INDEX + 1];
struct aggr_info *aggr_cntxt;
struct ath6kl_htcap htcap;
struct timer_list disconnect_timer;
struct timer_list sched_scan_timer;
......@@ -521,6 +544,8 @@ struct ath6kl_vif {
u32 send_action_id;
bool probe_req_report;
u16 next_chan;
enum nl80211_channel_type next_ch_type;
enum ieee80211_band next_ch_band;
u16 assoc_bss_beacon_int;
u16 listen_intvl_t;
u16 bmiss_time_t;
......@@ -568,6 +593,7 @@ struct ath6kl {
struct ath6kl_bmi bmi;
const struct ath6kl_hif_ops *hif_ops;
const struct ath6kl_htc_ops *htc_ops;
struct wmi *wmi;
int tx_pending[ENDPOINT_MAX];
int total_tx_data_pend;
......@@ -746,7 +772,8 @@ void init_netdev(struct net_device *dev);
void ath6kl_cookie_init(struct ath6kl *ar);
void ath6kl_cookie_cleanup(struct ath6kl *ar);
void ath6kl_rx(struct htc_target *target, struct htc_packet *packet);
void ath6kl_tx_complete(void *context, struct list_head *packet_queue);
void ath6kl_tx_complete(struct htc_target *context,
struct list_head *packet_queue);
enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
struct htc_packet *packet);
void ath6kl_stop_txrx(struct ath6kl *ar);
......@@ -821,8 +848,11 @@ int ath6kl_init_hw_params(struct ath6kl *ar);
void ath6kl_check_wow_status(struct ath6kl *ar);
void ath6kl_core_tx_complete(struct ath6kl *ar, struct sk_buff *skb);
void ath6kl_core_rx_complete(struct ath6kl *ar, struct sk_buff *skb, u8 pipe);
struct ath6kl *ath6kl_core_create(struct device *dev);
int ath6kl_core_init(struct ath6kl *ar);
int ath6kl_core_init(struct ath6kl *ar, enum ath6kl_htc_type htc_type);
void ath6kl_core_cleanup(struct ath6kl *ar);
void ath6kl_core_destroy(struct ath6kl *ar);
......
......@@ -616,6 +616,12 @@ static ssize_t read_file_tgt_stats(struct file *file, char __user *user_buf,
"Num disconnects", tgt_stats->cs_discon_cnt);
len += scnprintf(buf + len, buf_len - len, "%20s %10d\n",
"Beacon avg rssi", tgt_stats->cs_ave_beacon_rssi);
len += scnprintf(buf + len, buf_len - len, "%20s %10d\n",
"ARP pkt received", tgt_stats->arp_received);
len += scnprintf(buf + len, buf_len - len, "%20s %10d\n",
"ARP pkt matched", tgt_stats->arp_matched);
len += scnprintf(buf + len, buf_len - len, "%20s %10d\n",
"ARP pkt replied", tgt_stats->arp_replied);
if (len > buf_len)
len = buf_len;
......
......@@ -43,6 +43,7 @@ enum ATH6K_DEBUG_MASK {
ATH6KL_DBG_WMI_DUMP = BIT(19),
ATH6KL_DBG_SUSPEND = BIT(20),
ATH6KL_DBG_USB = BIT(21),
ATH6KL_DBG_USB_BULK = BIT(22),
ATH6KL_DBG_ANY = 0xffffffff /* enable all logs */
};
......
......@@ -150,4 +150,38 @@ static inline void ath6kl_hif_stop(struct ath6kl *ar)
ar->hif_ops->stop(ar);
}
static inline int ath6kl_hif_pipe_send(struct ath6kl *ar,
u8 pipe, struct sk_buff *hdr_buf,
struct sk_buff *buf)
{
ath6kl_dbg(ATH6KL_DBG_HIF, "hif pipe send\n");
return ar->hif_ops->pipe_send(ar, pipe, hdr_buf, buf);
}
static inline void ath6kl_hif_pipe_get_default(struct ath6kl *ar,
u8 *ul_pipe, u8 *dl_pipe)
{
ath6kl_dbg(ATH6KL_DBG_HIF, "hif pipe get default\n");
ar->hif_ops->pipe_get_default(ar, ul_pipe, dl_pipe);
}
static inline int ath6kl_hif_pipe_map_service(struct ath6kl *ar,
u16 service_id, u8 *ul_pipe,
u8 *dl_pipe)
{
ath6kl_dbg(ATH6KL_DBG_HIF, "hif pipe get default\n");
return ar->hif_ops->pipe_map_service(ar, service_id, ul_pipe, dl_pipe);
}
static inline u16 ath6kl_hif_pipe_get_free_queue_number(struct ath6kl *ar,
u8 pipe)
{
ath6kl_dbg(ATH6KL_DBG_HIF, "hif pipe get free queue number\n");
return ar->hif_ops->pipe_get_free_queue_number(ar, pipe);
}
#endif
......@@ -256,6 +256,12 @@ struct ath6kl_hif_ops {
int (*power_on)(struct ath6kl *ar);
int (*power_off)(struct ath6kl *ar);
void (*stop)(struct ath6kl *ar);
int (*pipe_send)(struct ath6kl *ar, u8 pipe, struct sk_buff *hdr_buf,
struct sk_buff *buf);
void (*pipe_get_default)(struct ath6kl *ar, u8 *pipe_ul, u8 *pipe_dl);
int (*pipe_map_service)(struct ath6kl *ar, u16 service_id, u8 *pipe_ul,
u8 *pipe_dl);
u16 (*pipe_get_free_queue_number)(struct ath6kl *ar, u8 pipe);
};
int ath6kl_hif_setup(struct ath6kl_device *dev);
......
/*
* Copyright (c) 2004-2011 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#ifndef HTC_OPS_H
#define HTC_OPS_H
#include "htc.h"
#include "debug.h"
static inline void *ath6kl_htc_create(struct ath6kl *ar)
{
return ar->htc_ops->create(ar);
}
static inline int ath6kl_htc_wait_target(struct htc_target *target)
{
return target->dev->ar->htc_ops->wait_target(target);
}
static inline int ath6kl_htc_start(struct htc_target *target)
{
return target->dev->ar->htc_ops->start(target);
}
static inline int ath6kl_htc_conn_service(struct htc_target *target,
struct htc_service_connect_req *req,
struct htc_service_connect_resp *resp)
{
return target->dev->ar->htc_ops->conn_service(target, req, resp);
}
static inline int ath6kl_htc_tx(struct htc_target *target,
struct htc_packet *packet)
{
return target->dev->ar->htc_ops->tx(target, packet);
}
static inline void ath6kl_htc_stop(struct htc_target *target)
{
return target->dev->ar->htc_ops->stop(target);
}
static inline void ath6kl_htc_cleanup(struct htc_target *target)
{
return target->dev->ar->htc_ops->cleanup(target);
}
static inline void ath6kl_htc_flush_txep(struct htc_target *target,
enum htc_endpoint_id endpoint,
u16 tag)
{
return target->dev->ar->htc_ops->flush_txep(target, endpoint, tag);
}
static inline void ath6kl_htc_flush_rx_buf(struct htc_target *target)
{
return target->dev->ar->htc_ops->flush_rx_buf(target);
}
static inline void ath6kl_htc_activity_changed(struct htc_target *target,
enum htc_endpoint_id endpoint,
bool active)
{
return target->dev->ar->htc_ops->activity_changed(target, endpoint,
active);
}
static inline int ath6kl_htc_get_rxbuf_num(struct htc_target *target,
enum htc_endpoint_id endpoint)
{
return target->dev->ar->htc_ops->get_rxbuf_num(target, endpoint);
}
static inline int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
struct list_head *pktq)
{
return target->dev->ar->htc_ops->add_rxbuf_multiple(target, pktq);
}
static inline int ath6kl_htc_credit_setup(struct htc_target *target,
struct ath6kl_htc_credit_info *info)
{
return target->dev->ar->htc_ops->credit_setup(target, info);
}
static inline void ath6kl_htc_tx_complete(struct ath6kl *ar,
struct sk_buff *skb)
{
ar->htc_ops->tx_complete(ar, skb);
}
static inline void ath6kl_htc_rx_complete(struct ath6kl *ar,
struct sk_buff *skb, u8 pipe)
{
ar->htc_ops->rx_complete(ar, skb, pipe);
}
#endif
......@@ -25,6 +25,7 @@
/* send direction */
#define HTC_FLAGS_NEED_CREDIT_UPDATE (1 << 0)
#define HTC_FLAGS_SEND_BUNDLE (1 << 1)
#define HTC_FLAGS_TX_FIXUP_NETBUF (1 << 2)
/* receive direction */
#define HTC_FLG_RX_UNUSED (1 << 0)
......@@ -56,6 +57,10 @@
#define HTC_CONN_FLGS_THRESH_LVL_THREE_QUAT 0x2
#define HTC_CONN_FLGS_REDUCE_CRED_DRIB 0x4
#define HTC_CONN_FLGS_THRESH_MASK 0x3
/* disable credit flow control on a specific service */
#define HTC_CONN_FLGS_DISABLE_CRED_FLOW_CTRL (1 << 3)
#define HTC_CONN_FLGS_SET_RECV_ALLOC_SHIFT 8
#define HTC_CONN_FLGS_SET_RECV_ALLOC_MASK 0xFF00
/* connect response status codes */
#define HTC_SERVICE_SUCCESS 0
......@@ -75,6 +80,7 @@
#define HTC_RECORD_LOOKAHEAD_BUNDLE 3
#define HTC_SETUP_COMP_FLG_RX_BNDL_EN (1 << 0)
#define HTC_SETUP_COMP_FLG_DISABLE_TX_CREDIT_FLOW (1 << 1)
#define MAKE_SERVICE_ID(group, index) \
(int)(((int)group << 8) | (int)(index))
......@@ -109,6 +115,8 @@
/* HTC operational parameters */
#define HTC_TARGET_RESPONSE_TIMEOUT 2000 /* in ms */
#define HTC_TARGET_RESPONSE_POLL_WAIT 10
#define HTC_TARGET_RESPONSE_POLL_COUNT 200
#define HTC_TARGET_DEBUG_INTR_MASK 0x01
#define HTC_TARGET_CREDIT_INTR_MASK 0xF0
......@@ -128,6 +136,7 @@
#define HTC_RECV_WAIT_BUFFERS (1 << 0)
#define HTC_OP_STATE_STOPPING (1 << 0)
#define HTC_OP_STATE_SETUP_COMPLETE (1 << 1)
/*
* The frame header length and message formats defined herein were selected
......@@ -311,6 +320,14 @@ struct htc_packet {
void (*completion) (struct htc_target *, struct htc_packet *);
struct htc_target *context;
/*
* optimization for network-oriented data, the HTC packet
* can pass the network buffer corresponding to the HTC packet
* lower layers may optimized the transfer knowing this is
* a network buffer
*/
struct sk_buff *skb;
};
enum htc_send_full_action {
......@@ -319,12 +336,14 @@ enum htc_send_full_action {
};
struct htc_ep_callbacks {
void (*tx_complete) (struct htc_target *, struct htc_packet *);
void (*rx) (struct htc_target *, struct htc_packet *);
void (*rx_refill) (struct htc_target *, enum htc_endpoint_id endpoint);
enum htc_send_full_action (*tx_full) (struct htc_target *,
struct htc_packet *);
struct htc_packet *(*rx_allocthresh) (struct htc_target *,
enum htc_endpoint_id, int);
void (*tx_comp_multi) (struct htc_target *, struct list_head *);
int rx_alloc_thresh;
int rx_refill_thresh;
};
......@@ -502,6 +521,13 @@ struct htc_endpoint {
u32 conn_flags;
struct htc_endpoint_stats ep_st;
u16 tx_drop_packet_threshold;
struct {
u8 pipeid_ul;
u8 pipeid_dl;
struct list_head tx_lookup_queue;
bool tx_credit_flow_enabled;
} pipe;
};
struct htc_control_buffer {
......@@ -509,6 +535,42 @@ struct htc_control_buffer {
u8 *buf;
};
struct htc_pipe_txcredit_alloc {
u16 service_id;
u8 credit_alloc;
};
enum htc_send_queue_result {
HTC_SEND_QUEUE_OK = 0, /* packet was queued */
HTC_SEND_QUEUE_DROP = 1, /* this packet should be dropped */
};
struct ath6kl_htc_ops {
void* (*create)(struct ath6kl *ar);
int (*wait_target)(struct htc_target *target);
int (*start)(struct htc_target *target);
int (*conn_service)(struct htc_target *target,
struct htc_service_connect_req *req,
struct htc_service_connect_resp *resp);
int (*tx)(struct htc_target *target, struct htc_packet *packet);
void (*stop)(struct htc_target *target);
void (*cleanup)(struct htc_target *target);
void (*flush_txep)(struct htc_target *target,
enum htc_endpoint_id endpoint, u16 tag);
void (*flush_rx_buf)(struct htc_target *target);
void (*activity_changed)(struct htc_target *target,
enum htc_endpoint_id endpoint,
bool active);
int (*get_rxbuf_num)(struct htc_target *target,
enum htc_endpoint_id endpoint);
int (*add_rxbuf_multiple)(struct htc_target *target,
struct list_head *pktq);
int (*credit_setup)(struct htc_target *target,
struct ath6kl_htc_credit_info *cred_info);
int (*tx_complete)(struct ath6kl *ar, struct sk_buff *skb);
int (*rx_complete)(struct ath6kl *ar, struct sk_buff *skb, u8 pipe);
};
struct ath6kl_device;
/* our HTC target state */
......@@ -557,36 +619,19 @@ struct htc_target {
/* counts the number of Tx without bundling continously per AC */
u32 ac_tx_count[WMM_NUM_AC];
struct {
struct htc_packet *htc_packet_pool;
u8 ctrl_response_buf[HTC_MAX_CTRL_MSG_LEN];
int ctrl_response_len;
bool ctrl_response_valid;
struct htc_pipe_txcredit_alloc txcredit_alloc[ENDPOINT_MAX];
} pipe;
};
void *ath6kl_htc_create(struct ath6kl *ar);
void ath6kl_htc_set_credit_dist(struct htc_target *target,
struct ath6kl_htc_credit_info *cred_info,
u16 svc_pri_order[], int len);
int ath6kl_htc_wait_target(struct htc_target *target);
int ath6kl_htc_start(struct htc_target *target);
int ath6kl_htc_conn_service(struct htc_target *target,
struct htc_service_connect_req *req,
struct htc_service_connect_resp *resp);
int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet);
void ath6kl_htc_stop(struct htc_target *target);
void ath6kl_htc_cleanup(struct htc_target *target);
void ath6kl_htc_flush_txep(struct htc_target *target,
enum htc_endpoint_id endpoint, u16 tag);
void ath6kl_htc_flush_rx_buf(struct htc_target *target);
void ath6kl_htc_indicate_activity_change(struct htc_target *target,
enum htc_endpoint_id endpoint,
bool active);
int ath6kl_htc_get_rxbuf_num(struct htc_target *target,
enum htc_endpoint_id endpoint);
int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
struct list_head *pktq);
int ath6kl_htc_rxmsg_pending_handler(struct htc_target *target,
u32 msg_look_ahead, int *n_pkts);
int ath6kl_credit_setup(void *htc_handle,
struct ath6kl_htc_credit_info *cred_info);
static inline void set_htc_pkt_info(struct htc_packet *packet, void *context,
u8 *buf, unsigned int len,
enum htc_endpoint_id eid, u16 tag)
......@@ -626,4 +671,7 @@ static inline int get_queue_depth(struct list_head *queue)
return depth;
}
void ath6kl_htc_pipe_attach(struct ath6kl *ar);
void ath6kl_htc_mbox_attach(struct ath6kl *ar);
#endif
......@@ -23,6 +23,14 @@
#define CALC_TXRX_PADDED_LEN(dev, len) (__ALIGN_MASK((len), (dev)->block_mask))
static void ath6kl_htc_mbox_cleanup(struct htc_target *target);
static void ath6kl_htc_mbox_stop(struct htc_target *target);
static int ath6kl_htc_mbox_add_rxbuf_multiple(struct htc_target *target,
struct list_head *pkt_queue);
static void ath6kl_htc_set_credit_dist(struct htc_target *target,
struct ath6kl_htc_credit_info *cred_info,
u16 svc_pri_order[], int len);
/* threshold to re-enable Tx bundling for an AC*/
#define TX_RESUME_BUNDLE_THRESHOLD 1500
......@@ -130,8 +138,8 @@ static void ath6kl_credit_init(struct ath6kl_htc_credit_info *cred_info,
}
/* initialize and setup credit distribution */
int ath6kl_credit_setup(void *htc_handle,
struct ath6kl_htc_credit_info *cred_info)
static int ath6kl_htc_mbox_credit_setup(struct htc_target *htc_target,
struct ath6kl_htc_credit_info *cred_info)
{
u16 servicepriority[5];
......@@ -144,7 +152,7 @@ int ath6kl_credit_setup(void *htc_handle,
servicepriority[4] = WMI_DATA_BK_SVC; /* lowest */
/* set priority list */
ath6kl_htc_set_credit_dist(htc_handle, cred_info, servicepriority, 5);
ath6kl_htc_set_credit_dist(htc_target, cred_info, servicepriority, 5);
return 0;
}
......@@ -432,7 +440,7 @@ static void htc_tx_complete(struct htc_endpoint *endpoint,
"htc tx complete ep %d pkts %d\n",
endpoint->eid, get_queue_depth(txq));
ath6kl_tx_complete(endpoint->target->dev->ar, txq);
ath6kl_tx_complete(endpoint->target, txq);
}
static void htc_tx_comp_handler(struct htc_target *target,
......@@ -1065,7 +1073,7 @@ static int htc_setup_tx_complete(struct htc_target *target)
return status;
}
void ath6kl_htc_set_credit_dist(struct htc_target *target,
static void ath6kl_htc_set_credit_dist(struct htc_target *target,
struct ath6kl_htc_credit_info *credit_info,
u16 srvc_pri_order[], int list_len)
{
......@@ -1093,7 +1101,8 @@ void ath6kl_htc_set_credit_dist(struct htc_target *target,
}
}
int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet)
static int ath6kl_htc_mbox_tx(struct htc_target *target,
struct htc_packet *packet)
{
struct htc_endpoint *endpoint;
struct list_head queue;
......@@ -1121,7 +1130,7 @@ int ath6kl_htc_tx(struct htc_target *target, struct htc_packet *packet)
}
/* flush endpoint TX queue */
void ath6kl_htc_flush_txep(struct htc_target *target,
static void ath6kl_htc_mbox_flush_txep(struct htc_target *target,
enum htc_endpoint_id eid, u16 tag)
{
struct htc_packet *packet, *tmp_pkt;
......@@ -1173,12 +1182,13 @@ static void ath6kl_htc_flush_txep_all(struct htc_target *target)
if (endpoint->svc_id == 0)
/* not in use.. */
continue;
ath6kl_htc_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL);
ath6kl_htc_mbox_flush_txep(target, i, HTC_TX_PACKET_TAG_ALL);
}
}
void ath6kl_htc_indicate_activity_change(struct htc_target *target,
enum htc_endpoint_id eid, bool active)
static void ath6kl_htc_mbox_activity_changed(struct htc_target *target,
enum htc_endpoint_id eid,
bool active)
{
struct htc_endpoint *endpoint = &target->endpoint[eid];
bool dist = false;
......@@ -1246,7 +1256,7 @@ static int htc_add_rxbuf(struct htc_target *target, struct htc_packet *packet)
INIT_LIST_HEAD(&queue);
list_add_tail(&packet->list, &queue);
return ath6kl_htc_add_rxbuf_multiple(target, &queue);
return ath6kl_htc_mbox_add_rxbuf_multiple(target, &queue);
}
static void htc_reclaim_rxbuf(struct htc_target *target,
......@@ -1353,7 +1363,9 @@ static int ath6kl_htc_rx_setup(struct htc_target *target,
sizeof(*htc_hdr));
if (!htc_valid_rx_frame_len(target, ep->eid, full_len)) {
ath6kl_warn("Rx buffer requested with invalid length\n");
ath6kl_warn("Rx buffer requested with invalid length htc_hdr:eid %d, flags 0x%x, len %d\n",
htc_hdr->eid, htc_hdr->flags,
le16_to_cpu(htc_hdr->payld_len));
return -EINVAL;
}
......@@ -2288,7 +2300,7 @@ static struct htc_packet *htc_wait_for_ctrl_msg(struct htc_target *target)
return NULL;
}
int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
static int ath6kl_htc_mbox_add_rxbuf_multiple(struct htc_target *target,
struct list_head *pkt_queue)
{
struct htc_endpoint *endpoint;
......@@ -2350,7 +2362,7 @@ int ath6kl_htc_add_rxbuf_multiple(struct htc_target *target,
return status;
}
void ath6kl_htc_flush_rx_buf(struct htc_target *target)
static void ath6kl_htc_mbox_flush_rx_buf(struct htc_target *target)
{
struct htc_endpoint *endpoint;
struct htc_packet *packet, *tmp_pkt;
......@@ -2392,7 +2404,7 @@ void ath6kl_htc_flush_rx_buf(struct htc_target *target)
}
}
int ath6kl_htc_conn_service(struct htc_target *target,
static int ath6kl_htc_mbox_conn_service(struct htc_target *target,
struct htc_service_connect_req *conn_req,
struct htc_service_connect_resp *conn_resp)
{
......@@ -2564,7 +2576,7 @@ static void reset_ep_state(struct htc_target *target)
INIT_LIST_HEAD(&target->cred_dist_list);
}
int ath6kl_htc_get_rxbuf_num(struct htc_target *target,
static int ath6kl_htc_mbox_get_rxbuf_num(struct htc_target *target,
enum htc_endpoint_id endpoint)
{
int num;
......@@ -2624,7 +2636,7 @@ static void htc_setup_msg_bndl(struct htc_target *target)
}
}
int ath6kl_htc_wait_target(struct htc_target *target)
static int ath6kl_htc_mbox_wait_target(struct htc_target *target)
{
struct htc_packet *packet = NULL;
struct htc_ready_ext_msg *rdy_msg;
......@@ -2693,12 +2705,12 @@ int ath6kl_htc_wait_target(struct htc_target *target)
connect.svc_id = HTC_CTRL_RSVD_SVC;
/* connect fake service */
status = ath6kl_htc_conn_service((void *)target, &connect, &resp);
status = ath6kl_htc_mbox_conn_service((void *)target, &connect, &resp);
if (status)
/*
* FIXME: this call doesn't make sense, the caller should
* call ath6kl_htc_cleanup() when it wants remove htc
* call ath6kl_htc_mbox_cleanup() when it wants remove htc
*/
ath6kl_hif_cleanup_scatter(target->dev->ar);
......@@ -2715,7 +2727,7 @@ int ath6kl_htc_wait_target(struct htc_target *target)
* Start HTC, enable interrupts and let the target know
* host has finished setup.
*/
int ath6kl_htc_start(struct htc_target *target)
static int ath6kl_htc_mbox_start(struct htc_target *target)
{
struct htc_packet *packet;
int status;
......@@ -2752,7 +2764,7 @@ int ath6kl_htc_start(struct htc_target *target)
status = ath6kl_hif_unmask_intrs(target->dev);
if (status)
ath6kl_htc_stop(target);
ath6kl_htc_mbox_stop(target);
return status;
}
......@@ -2796,7 +2808,7 @@ static int ath6kl_htc_reset(struct htc_target *target)
}
/* htc_stop: stop interrupt reception, and flush all queued buffers */
void ath6kl_htc_stop(struct htc_target *target)
static void ath6kl_htc_mbox_stop(struct htc_target *target)
{
spin_lock_bh(&target->htc_lock);
target->htc_flags |= HTC_OP_STATE_STOPPING;
......@@ -2811,12 +2823,12 @@ void ath6kl_htc_stop(struct htc_target *target)
ath6kl_htc_flush_txep_all(target);
ath6kl_htc_flush_rx_buf(target);
ath6kl_htc_mbox_flush_rx_buf(target);
ath6kl_htc_reset(target);
}
void *ath6kl_htc_create(struct ath6kl *ar)
static void *ath6kl_htc_mbox_create(struct ath6kl *ar)
{
struct htc_target *target = NULL;
int status = 0;
......@@ -2857,13 +2869,13 @@ void *ath6kl_htc_create(struct ath6kl *ar)
return target;
err_htc_cleanup:
ath6kl_htc_cleanup(target);
ath6kl_htc_mbox_cleanup(target);
return NULL;
}
/* cleanup the HTC instance */
void ath6kl_htc_cleanup(struct htc_target *target)
static void ath6kl_htc_mbox_cleanup(struct htc_target *target)
{
struct htc_packet *packet, *tmp_packet;
......@@ -2888,3 +2900,24 @@ void ath6kl_htc_cleanup(struct htc_target *target)
kfree(target->dev);
kfree(target);
}
static const struct ath6kl_htc_ops ath6kl_htc_mbox_ops = {
.create = ath6kl_htc_mbox_create,
.wait_target = ath6kl_htc_mbox_wait_target,
.start = ath6kl_htc_mbox_start,
.conn_service = ath6kl_htc_mbox_conn_service,
.tx = ath6kl_htc_mbox_tx,
.stop = ath6kl_htc_mbox_stop,
.cleanup = ath6kl_htc_mbox_cleanup,
.flush_txep = ath6kl_htc_mbox_flush_txep,
.flush_rx_buf = ath6kl_htc_mbox_flush_rx_buf,
.activity_changed = ath6kl_htc_mbox_activity_changed,
.get_rxbuf_num = ath6kl_htc_mbox_get_rxbuf_num,
.add_rxbuf_multiple = ath6kl_htc_mbox_add_rxbuf_multiple,
.credit_setup = ath6kl_htc_mbox_credit_setup,
};
void ath6kl_htc_mbox_attach(struct ath6kl *ar)
{
ar->htc_ops = &ath6kl_htc_mbox_ops;
}
/*
* Copyright (c) 2007-2011 Atheros Communications Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include "core.h"
#include "debug.h"
#include "hif-ops.h"
#define HTC_PACKET_CONTAINER_ALLOCATION 32
#define HTC_CONTROL_BUFFER_SIZE (HTC_MAX_CTRL_MSG_LEN + HTC_HDR_LENGTH)
static int ath6kl_htc_pipe_tx(struct htc_target *handle,
struct htc_packet *packet);
static void ath6kl_htc_pipe_cleanup(struct htc_target *handle);
/* htc pipe tx path */
static inline void restore_tx_packet(struct htc_packet *packet)
{
if (packet->info.tx.flags & HTC_FLAGS_TX_FIXUP_NETBUF) {
skb_pull(packet->skb, sizeof(struct htc_frame_hdr));
packet->info.tx.flags &= ~HTC_FLAGS_TX_FIXUP_NETBUF;
}
}
static void do_send_completion(struct htc_endpoint *ep,
struct list_head *queue_to_indicate)
{
struct htc_packet *packet;
if (list_empty(queue_to_indicate)) {
/* nothing to indicate */
return;
}
if (ep->ep_cb.tx_comp_multi != NULL) {
ath6kl_dbg(ATH6KL_DBG_HTC,
"%s: calling ep %d, send complete multiple callback (%d pkts)\n",
__func__, ep->eid,
get_queue_depth(queue_to_indicate));
/*
* a multiple send complete handler is being used,
* pass the queue to the handler
*/
ep->ep_cb.tx_comp_multi(ep->target, queue_to_indicate);
/*
* all packets are now owned by the callback,
* reset queue to be safe
*/
INIT_LIST_HEAD(queue_to_indicate);
} else {
/* using legacy EpTxComplete */
do {
packet = list_first_entry(queue_to_indicate,
struct htc_packet, list);
list_del(&packet->list);
ath6kl_dbg(ATH6KL_DBG_HTC,
"%s: calling ep %d send complete callback on packet 0x%p\n",
__func__, ep->eid, packet);
ep->ep_cb.tx_complete(ep->target, packet);
} while (!list_empty(queue_to_indicate));
}
}
static void send_packet_completion(struct htc_target *target,
struct htc_packet *packet)
{
struct htc_endpoint *ep = &target->endpoint[packet->endpoint];
struct list_head container;
restore_tx_packet(packet);
INIT_LIST_HEAD(&container);
list_add_tail(&packet->list, &container);
/* do completion */
do_send_completion(ep, &container);
}
static void get_htc_packet_credit_based(struct htc_target *target,
struct htc_endpoint *ep,
struct list_head *queue)
{
int credits_required;
int remainder;
u8 send_flags;
struct htc_packet *packet;
unsigned int transfer_len;
/* NOTE : the TX lock is held when this function is called */
/* loop until we can grab as many packets out of the queue as we can */
while (true) {
send_flags = 0;
if (list_empty(&ep->txq))
break;
/* get packet at head, but don't remove it */
packet = list_first_entry(&ep->txq, struct htc_packet, list);
if (packet == NULL)
break;
ath6kl_dbg(ATH6KL_DBG_HTC,
"%s: got head packet:0x%p , queue depth: %d\n",
__func__, packet, get_queue_depth(&ep->txq));
transfer_len = packet->act_len + HTC_HDR_LENGTH;
if (transfer_len <= target->tgt_cred_sz) {
credits_required = 1;
} else {
/* figure out how many credits this message requires */
credits_required = transfer_len / target->tgt_cred_sz;
remainder = transfer_len % target->tgt_cred_sz;
if (remainder)
credits_required++;
}
ath6kl_dbg(ATH6KL_DBG_HTC, "%s: creds required:%d got:%d\n",
__func__, credits_required, ep->cred_dist.credits);
if (ep->eid == ENDPOINT_0) {
/*
* endpoint 0 is special, it always has a credit and
* does not require credit based flow control
*/
credits_required = 0;
} else {
if (ep->cred_dist.credits < credits_required)
break;
ep->cred_dist.credits -= credits_required;
ep->ep_st.cred_cosumd += credits_required;
/* check if we need credits back from the target */
if (ep->cred_dist.credits <
ep->cred_dist.cred_per_msg) {
/* tell the target we need credits ASAP! */
send_flags |= HTC_FLAGS_NEED_CREDIT_UPDATE;
ep->ep_st.cred_low_indicate += 1;
ath6kl_dbg(ATH6KL_DBG_HTC,
"%s: host needs credits\n",
__func__);
}
}
/* now we can fully dequeue */
packet = list_first_entry(&ep->txq, struct htc_packet, list);
list_del(&packet->list);
/* save the number of credits this packet consumed */
packet->info.tx.cred_used = credits_required;
/* save send flags */
packet->info.tx.flags = send_flags;
packet->info.tx.seqno = ep->seqno;
ep->seqno++;
/* queue this packet into the caller's queue */
list_add_tail(&packet->list, queue);
}
}
static void get_htc_packet(struct htc_target *target,
struct htc_endpoint *ep,
struct list_head *queue, int resources)
{
struct htc_packet *packet;
/* NOTE : the TX lock is held when this function is called */
/* loop until we can grab as many packets out of the queue as we can */
while (resources) {
if (list_empty(&ep->txq))
break;
packet = list_first_entry(&ep->txq, struct htc_packet, list);
list_del(&packet->list);
ath6kl_dbg(ATH6KL_DBG_HTC,
"%s: got packet:0x%p , new queue depth: %d\n",
__func__, packet, get_queue_depth(&ep->txq));
packet->info.tx.seqno = ep->seqno;
packet->info.tx.flags = 0;
packet->info.tx.cred_used = 0;
ep->seqno++;
/* queue this packet into the caller's queue */
list_add_tail(&packet->list, queue);
resources--;
}
}
static int htc_issue_packets(struct htc_target *target,
struct htc_endpoint *ep,
struct list_head *pkt_queue)
{
int status = 0;
u16 payload_len;
struct sk_buff *skb;
struct htc_frame_hdr *htc_hdr;
struct htc_packet *packet;
ath6kl_dbg(ATH6KL_DBG_HTC,
"%s: queue: 0x%p, pkts %d\n", __func__,
pkt_queue, get_queue_depth(pkt_queue));
while (!list_empty(pkt_queue)) {
packet = list_first_entry(pkt_queue, struct htc_packet, list);
list_del(&packet->list);
skb = packet->skb;
if (!skb) {
WARN_ON_ONCE(1);
status = -EINVAL;
break;
}
payload_len = packet->act_len;
/* setup HTC frame header */
htc_hdr = (struct htc_frame_hdr *) skb_push(skb,
sizeof(*htc_hdr));
if (!htc_hdr) {
WARN_ON_ONCE(1);
status = -EINVAL;
break;
}
packet->info.tx.flags |= HTC_FLAGS_TX_FIXUP_NETBUF;
/* Endianess? */
put_unaligned((u16) payload_len, &htc_hdr->payld_len);
htc_hdr->flags = packet->info.tx.flags;
htc_hdr->eid = (u8) packet->endpoint;
htc_hdr->ctrl[0] = 0;
htc_hdr->ctrl[1] = (u8) packet->info.tx.seqno;
spin_lock_bh(&target->tx_lock);
/* store in look up queue to match completions */
list_add_tail(&packet->list, &ep->pipe.tx_lookup_queue);
ep->ep_st.tx_issued += 1;
spin_unlock_bh(&target->tx_lock);
status = ath6kl_hif_pipe_send(target->dev->ar,
ep->pipe.pipeid_ul, NULL, skb);
if (status != 0) {
if (status != -ENOMEM) {
/* TODO: if more than 1 endpoint maps to the
* same PipeID, it is possible to run out of
* resources in the HIF layer.
* Don't emit the error
*/
ath6kl_dbg(ATH6KL_DBG_HTC,
"%s: failed status:%d\n",
__func__, status);
}
spin_lock_bh(&target->tx_lock);
list_del(&packet->list);
/* reclaim credits */
ep->cred_dist.credits += packet->info.tx.cred_used;
spin_unlock_bh(&target->tx_lock);
/* put it back into the callers queue */
list_add(&packet->list, pkt_queue);
break;
}
}
if (status != 0) {
while (!list_empty(pkt_queue)) {
if (status != -ENOMEM) {
ath6kl_dbg(ATH6KL_DBG_HTC,
"%s: failed pkt:0x%p status:%d\n",
__func__, packet, status);
}
packet = list_first_entry(pkt_queue,
struct htc_packet, list);
list_del(&packet->list);
packet->status = status;
send_packet_completion(target, packet);
}
}
return status;
}
static enum htc_send_queue_result htc_try_send(struct htc_target *target,
struct htc_endpoint *ep,
struct list_head *txq)
{
struct list_head send_queue; /* temp queue to hold packets */
struct htc_packet *packet, *tmp_pkt;
struct ath6kl *ar = target->dev->ar;
enum htc_send_full_action action;
int tx_resources, overflow, txqueue_depth, i, good_pkts;
u8 pipeid;
ath6kl_dbg(ATH6KL_DBG_HTC, "%s: (queue:0x%p depth:%d)\n",
__func__, txq,
(txq == NULL) ? 0 : get_queue_depth(txq));
/* init the local send queue */
INIT_LIST_HEAD(&send_queue);
/*
* txq equals to NULL means
* caller didn't provide a queue, just wants us to
* check queues and send
*/
if (txq != NULL) {
if (list_empty(txq)) {
/* empty queue */
return HTC_SEND_QUEUE_DROP;
}
spin_lock_bh(&target->tx_lock);
txqueue_depth = get_queue_depth(&ep->txq);
spin_unlock_bh(&target->tx_lock);
if (txqueue_depth >= ep->max_txq_depth) {
/* we've already overflowed */
overflow = get_queue_depth(txq);
} else {
/* get how much we will overflow by */
overflow = txqueue_depth;
overflow += get_queue_depth(txq);
/* get how much we will overflow the TX queue by */
overflow -= ep->max_txq_depth;
}
/* if overflow is negative or zero, we are okay */
if (overflow > 0) {
ath6kl_dbg(ATH6KL_DBG_HTC,
"%s: Endpoint %d, TX queue will overflow :%d, Tx Depth:%d, Max:%d\n",
__func__, ep->eid, overflow, txqueue_depth,
ep->max_txq_depth);
}
if ((overflow <= 0) ||
(ep->ep_cb.tx_full == NULL)) {
/*
* all packets will fit or caller did not provide send
* full indication handler -- just move all of them
* to the local send_queue object
*/
list_splice_tail_init(txq, &send_queue);
} else {
good_pkts = get_queue_depth(txq) - overflow;
if (good_pkts < 0) {
WARN_ON_ONCE(1);
return HTC_SEND_QUEUE_DROP;
}
/* we have overflowed, and a callback is provided */
/* dequeue all non-overflow packets to the sendqueue */
for (i = 0; i < good_pkts; i++) {
/* pop off caller's queue */
packet = list_first_entry(txq,
struct htc_packet,
list);
list_del(&packet->list);
/* insert into local queue */
list_add_tail(&packet->list, &send_queue);
}
/*
* the caller's queue has all the packets that won't fit
* walk through the caller's queue and indicate each to
* the send full handler
*/
list_for_each_entry_safe(packet, tmp_pkt,
txq, list) {
ath6kl_dbg(ATH6KL_DBG_HTC,
"%s: Indicat overflowed TX pkts: %p\n",
__func__, packet);
action = ep->ep_cb.tx_full(ep->target, packet);
if (action == HTC_SEND_FULL_DROP) {
/* callback wants the packet dropped */
ep->ep_st.tx_dropped += 1;
/* leave this one in the caller's queue
* for cleanup */
} else {
/* callback wants to keep this packet,
* remove from caller's queue */
list_del(&packet->list);
/* put it in the send queue */
list_add_tail(&packet->list,
&send_queue);
}
}
if (list_empty(&send_queue)) {
/* no packets made it in, caller will cleanup */
return HTC_SEND_QUEUE_DROP;
}
}
}
if (!ep->pipe.tx_credit_flow_enabled) {
tx_resources =
ath6kl_hif_pipe_get_free_queue_number(ar,
ep->pipe.pipeid_ul);
} else {
tx_resources = 0;
}
spin_lock_bh(&target->tx_lock);
if (!list_empty(&send_queue)) {
/* transfer packets to tail */
list_splice_tail_init(&send_queue, &ep->txq);
if (!list_empty(&send_queue)) {
WARN_ON_ONCE(1);
spin_unlock_bh(&target->tx_lock);
return HTC_SEND_QUEUE_DROP;
}
INIT_LIST_HEAD(&send_queue);
}
/* increment tx processing count on entry */
ep->tx_proc_cnt++;
if (ep->tx_proc_cnt > 1) {
/*
* Another thread or task is draining the TX queues on this
* endpoint that thread will reset the tx processing count
* when the queue is drained.
*/
ep->tx_proc_cnt--;
spin_unlock_bh(&target->tx_lock);
return HTC_SEND_QUEUE_OK;
}
/***** beyond this point only 1 thread may enter ******/
/*
* Now drain the endpoint TX queue for transmission as long as we have
* enough transmit resources.
*/
while (true) {
if (get_queue_depth(&ep->txq) == 0)
break;
if (ep->pipe.tx_credit_flow_enabled) {
/*
* Credit based mechanism provides flow control
* based on target transmit resource availability,
* we assume that the HIF layer will always have
* bus resources greater than target transmit
* resources.
*/
get_htc_packet_credit_based(target, ep, &send_queue);
} else {
/*
* Get all packets for this endpoint that we can
* for this pass.
*/
get_htc_packet(target, ep, &send_queue, tx_resources);
}
if (get_queue_depth(&send_queue) == 0) {
/*
* Didn't get packets due to out of resources or TX
* queue was drained.
*/
break;
}
spin_unlock_bh(&target->tx_lock);
/* send what we can */
htc_issue_packets(target, ep, &send_queue);
if (!ep->pipe.tx_credit_flow_enabled) {
pipeid = ep->pipe.pipeid_ul;
tx_resources =
ath6kl_hif_pipe_get_free_queue_number(ar, pipeid);
}
spin_lock_bh(&target->tx_lock);
}
/* done with this endpoint, we can clear the count */
ep->tx_proc_cnt = 0;
spin_unlock_bh(&target->tx_lock);
return HTC_SEND_QUEUE_OK;
}
/* htc control packet manipulation */
static void destroy_htc_txctrl_packet(struct htc_packet *packet)
{
struct sk_buff *skb;
skb = packet->skb;
if (skb != NULL)
dev_kfree_skb(skb);
kfree(packet);
}
static struct htc_packet *build_htc_txctrl_packet(void)
{
struct htc_packet *packet = NULL;
struct sk_buff *skb;
packet = kzalloc(sizeof(struct htc_packet), GFP_KERNEL);
if (packet == NULL)
return NULL;
skb = __dev_alloc_skb(HTC_CONTROL_BUFFER_SIZE, GFP_KERNEL);
if (skb == NULL) {
kfree(packet);
return NULL;
}
packet->skb = skb;
return packet;
}
static void htc_free_txctrl_packet(struct htc_target *target,
struct htc_packet *packet)
{
destroy_htc_txctrl_packet(packet);
}
static struct htc_packet *htc_alloc_txctrl_packet(struct htc_target *target)
{
return build_htc_txctrl_packet();
}
static void htc_txctrl_complete(struct htc_target *target,
struct htc_packet *packet)
{
htc_free_txctrl_packet(target, packet);
}
#define MAX_MESSAGE_SIZE 1536
static int htc_setup_target_buffer_assignments(struct htc_target *target)
{
int status, credits, credit_per_maxmsg, i;
struct htc_pipe_txcredit_alloc *entry;
unsigned int hif_usbaudioclass = 0;
credit_per_maxmsg = MAX_MESSAGE_SIZE / target->tgt_cred_sz;
if (MAX_MESSAGE_SIZE % target->tgt_cred_sz)
credit_per_maxmsg++;
/* TODO, this should be configured by the caller! */
credits = target->tgt_creds;
entry = &target->pipe.txcredit_alloc[0];
status = -ENOMEM;
/* FIXME: hif_usbaudioclass is always zero */
if (hif_usbaudioclass) {
ath6kl_dbg(ATH6KL_DBG_HTC,
"%s: For USB Audio Class- Total:%d\n",
__func__, credits);
entry++;
entry++;
/* Setup VO Service To have Max Credits */
entry->service_id = WMI_DATA_VO_SVC;
entry->credit_alloc = (credits - 6);
if (entry->credit_alloc == 0)
entry->credit_alloc++;
credits -= (int) entry->credit_alloc;
if (credits <= 0)
return status;
entry++;
entry->service_id = WMI_CONTROL_SVC;
entry->credit_alloc = credit_per_maxmsg;
credits -= (int) entry->credit_alloc;
if (credits <= 0)
return status;
/* leftovers go to best effort */
entry++;
entry++;
entry->service_id = WMI_DATA_BE_SVC;
entry->credit_alloc = (u8) credits;
status = 0;
} else {
entry++;
entry->service_id = WMI_DATA_VI_SVC;
entry->credit_alloc = credits / 4;
if (entry->credit_alloc == 0)
entry->credit_alloc++;
credits -= (int) entry->credit_alloc;
if (credits <= 0)
return status;
entry++;
entry->service_id = WMI_DATA_VO_SVC;
entry->credit_alloc = credits / 4;
if (entry->credit_alloc == 0)
entry->credit_alloc++;
credits -= (int) entry->credit_alloc;
if (credits <= 0)
return status;
entry++;
entry->service_id = WMI_CONTROL_SVC;
entry->credit_alloc = credit_per_maxmsg;
credits -= (int) entry->credit_alloc;
if (credits <= 0)
return status;
entry++;
entry->service_id = WMI_DATA_BK_SVC;
entry->credit_alloc = credit_per_maxmsg;
credits -= (int) entry->credit_alloc;
if (credits <= 0)
return status;
/* leftovers go to best effort */
entry++;
entry->service_id = WMI_DATA_BE_SVC;
entry->credit_alloc = (u8) credits;
status = 0;
}
if (status == 0) {
for (i = 0; i < ENDPOINT_MAX; i++) {
if (target->pipe.txcredit_alloc[i].service_id != 0) {
ath6kl_dbg(ATH6KL_DBG_HTC,
"HTC Service Index : %d TX : 0x%2.2X : alloc:%d\n",
i,
target->pipe.txcredit_alloc[i].
service_id,
target->pipe.txcredit_alloc[i].
credit_alloc);
}
}
}
return status;
}
/* process credit reports and call distribution function */
static void htc_process_credit_report(struct htc_target *target,
struct htc_credit_report *rpt,
int num_entries,
enum htc_endpoint_id from_ep)
{
int total_credits = 0, i;
struct htc_endpoint *ep;
/* lock out TX while we update credits */
spin_lock_bh(&target->tx_lock);
for (i = 0; i < num_entries; i++, rpt++) {
if (rpt->eid >= ENDPOINT_MAX) {
WARN_ON_ONCE(1);
spin_unlock_bh(&target->tx_lock);
return;
}
ep = &target->endpoint[rpt->eid];
ep->cred_dist.credits += rpt->credits;
if (ep->cred_dist.credits && get_queue_depth(&ep->txq)) {
spin_unlock_bh(&target->tx_lock);
htc_try_send(target, ep, NULL);
spin_lock_bh(&target->tx_lock);
}
total_credits += rpt->credits;
}
ath6kl_dbg(ATH6KL_DBG_HTC,
"Report indicated %d credits to distribute\n",
total_credits);
spin_unlock_bh(&target->tx_lock);
}
/* flush endpoint TX queue */
static void htc_flush_tx_endpoint(struct htc_target *target,
struct htc_endpoint *ep, u16 tag)
{
struct htc_packet *packet;
spin_lock_bh(&target->tx_lock);
while (get_queue_depth(&ep->txq)) {
packet = list_first_entry(&ep->txq, struct htc_packet, list);
list_del(&packet->list);
packet->status = 0;
send_packet_completion(target, packet);
}
spin_unlock_bh(&target->tx_lock);
}
/*
* In the adapted HIF layer, struct sk_buff * are passed between HIF and HTC,
* since upper layers expects struct htc_packet containers we use the completed
* skb and lookup it's corresponding HTC packet buffer from a lookup list.
* This is extra overhead that can be fixed by re-aligning HIF interfaces with
* HTC.
*/
static struct htc_packet *htc_lookup_tx_packet(struct htc_target *target,
struct htc_endpoint *ep,
struct sk_buff *skb)
{
struct htc_packet *packet, *tmp_pkt, *found_packet = NULL;
spin_lock_bh(&target->tx_lock);
/*
* interate from the front of tx lookup queue
* this lookup should be fast since lower layers completes in-order and
* so the completed packet should be at the head of the list generally
*/
list_for_each_entry_safe(packet, tmp_pkt, &ep->pipe.tx_lookup_queue,
list) {
/* check for removal */
if (skb == packet->skb) {
/* found it */
list_del(&packet->list);
found_packet = packet;
break;
}
}
spin_unlock_bh(&target->tx_lock);
return found_packet;
}
static int ath6kl_htc_pipe_tx_complete(struct ath6kl *ar, struct sk_buff *skb)
{
struct htc_target *target = ar->htc_target;
struct htc_frame_hdr *htc_hdr;
struct htc_endpoint *ep;
struct htc_packet *packet;
u8 ep_id, *netdata;
u32 netlen;
netdata = skb->data;
netlen = skb->len;
htc_hdr = (struct htc_frame_hdr *) netdata;
ep_id = htc_hdr->eid;
ep = &target->endpoint[ep_id];
packet = htc_lookup_tx_packet(target, ep, skb);
if (packet == NULL) {
/* may have already been flushed and freed */
ath6kl_err("HTC TX lookup failed!\n");
} else {
/* will be giving this buffer back to upper layers */
packet->status = 0;
send_packet_completion(target, packet);
}
skb = NULL;
if (!ep->pipe.tx_credit_flow_enabled) {
/*
* note: when using TX credit flow, the re-checking of queues
* happens when credits flow back from the target. in the
* non-TX credit case, we recheck after the packet completes
*/
htc_try_send(target, ep, NULL);
}
return 0;
}
static int htc_send_packets_multiple(struct htc_target *target,
struct list_head *pkt_queue)
{
struct htc_endpoint *ep;
struct htc_packet *packet, *tmp_pkt;
if (list_empty(pkt_queue))
return -EINVAL;
/* get first packet to find out which ep the packets will go into */
packet = list_first_entry(pkt_queue, struct htc_packet, list);
if (packet == NULL)
return -EINVAL;
if (packet->endpoint >= ENDPOINT_MAX) {
WARN_ON_ONCE(1);
return -EINVAL;
}
ep = &target->endpoint[packet->endpoint];
htc_try_send(target, ep, pkt_queue);
/* do completion on any packets that couldn't get in */
if (!list_empty(pkt_queue)) {
list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
packet->status = -ENOMEM;
}
do_send_completion(ep, pkt_queue);
}
return 0;
}
/* htc pipe rx path */
static struct htc_packet *alloc_htc_packet_container(struct htc_target *target)
{
struct htc_packet *packet;
spin_lock_bh(&target->rx_lock);
if (target->pipe.htc_packet_pool == NULL) {
spin_unlock_bh(&target->rx_lock);
return NULL;
}
packet = target->pipe.htc_packet_pool;
target->pipe.htc_packet_pool = (struct htc_packet *) packet->list.next;
spin_unlock_bh(&target->rx_lock);
packet->list.next = NULL;
return packet;
}
static void free_htc_packet_container(struct htc_target *target,
struct htc_packet *packet)
{
struct list_head *lh;
spin_lock_bh(&target->rx_lock);
if (target->pipe.htc_packet_pool == NULL) {
target->pipe.htc_packet_pool = packet;
packet->list.next = NULL;
} else {
lh = (struct list_head *) target->pipe.htc_packet_pool;
packet->list.next = lh;
target->pipe.htc_packet_pool = packet;
}
spin_unlock_bh(&target->rx_lock);
}
static int htc_process_trailer(struct htc_target *target, u8 *buffer,
int len, enum htc_endpoint_id from_ep)
{
struct htc_credit_report *report;
struct htc_record_hdr *record;
u8 *record_buf, *orig_buf;
int orig_len, status;
orig_buf = buffer;
orig_len = len;
status = 0;
while (len > 0) {
if (len < sizeof(struct htc_record_hdr)) {
status = -EINVAL;
break;
}
/* these are byte aligned structs */
record = (struct htc_record_hdr *) buffer;
len -= sizeof(struct htc_record_hdr);
buffer += sizeof(struct htc_record_hdr);
if (record->len > len) {
/* no room left in buffer for record */
ath6kl_dbg(ATH6KL_DBG_HTC,
"invalid length: %d (id:%d) buffer has: %d bytes left\n",
record->len, record->rec_id, len);
status = -EINVAL;
break;
}
/* start of record follows the header */
record_buf = buffer;
switch (record->rec_id) {
case HTC_RECORD_CREDITS:
if (record->len < sizeof(struct htc_credit_report)) {
WARN_ON_ONCE(1);
return -EINVAL;
}
report = (struct htc_credit_report *) record_buf;
htc_process_credit_report(target, report,
record->len / sizeof(*report),
from_ep);
break;
default:
ath6kl_dbg(ATH6KL_DBG_HTC,
"unhandled record: id:%d length:%d\n",
record->rec_id, record->len);
break;
}
if (status != 0)
break;
/* advance buffer past this record for next time around */
buffer += record->len;
len -= record->len;
}
return status;
}
static void do_recv_completion(struct htc_endpoint *ep,
struct list_head *queue_to_indicate)
{
struct htc_packet *packet;
if (list_empty(queue_to_indicate)) {
/* nothing to indicate */
return;
}
/* using legacy EpRecv */
while (!list_empty(queue_to_indicate)) {
packet = list_first_entry(queue_to_indicate,
struct htc_packet, list);
list_del(&packet->list);
ep->ep_cb.rx(ep->target, packet);
}
return;
}
static void recv_packet_completion(struct htc_target *target,
struct htc_endpoint *ep,
struct htc_packet *packet)
{
struct list_head container;
INIT_LIST_HEAD(&container);
list_add_tail(&packet->list, &container);
/* do completion */
do_recv_completion(ep, &container);
}
static int ath6kl_htc_pipe_rx_complete(struct ath6kl *ar, struct sk_buff *skb,
u8 pipeid)
{
struct htc_target *target = ar->htc_target;
u8 *netdata, *trailer, hdr_info;
struct htc_frame_hdr *htc_hdr;
u32 netlen, trailerlen = 0;
struct htc_packet *packet;
struct htc_endpoint *ep;
u16 payload_len;
int status = 0;
netdata = skb->data;
netlen = skb->len;
htc_hdr = (struct htc_frame_hdr *) netdata;
ep = &target->endpoint[htc_hdr->eid];
if (htc_hdr->eid >= ENDPOINT_MAX) {
ath6kl_dbg(ATH6KL_DBG_HTC,
"HTC Rx: invalid EndpointID=%d\n",
htc_hdr->eid);
status = -EINVAL;
goto free_skb;
}
payload_len = le16_to_cpu(get_unaligned(&htc_hdr->payld_len));
if (netlen < (payload_len + HTC_HDR_LENGTH)) {
ath6kl_dbg(ATH6KL_DBG_HTC,
"HTC Rx: insufficient length, got:%d expected =%u\n",
netlen, payload_len + HTC_HDR_LENGTH);
status = -EINVAL;
goto free_skb;
}
/* get flags to check for trailer */
hdr_info = htc_hdr->flags;
if (hdr_info & HTC_FLG_RX_TRAILER) {
/* extract the trailer length */
hdr_info = htc_hdr->ctrl[0];
if ((hdr_info < sizeof(struct htc_record_hdr)) ||
(hdr_info > payload_len)) {
ath6kl_dbg(ATH6KL_DBG_HTC,
"invalid header: payloadlen should be %d, CB[0]: %d\n",
payload_len, hdr_info);
status = -EINVAL;
goto free_skb;
}
trailerlen = hdr_info;
/* process trailer after hdr/apps payload */
trailer = (u8 *) htc_hdr + HTC_HDR_LENGTH +
payload_len - hdr_info;
status = htc_process_trailer(target, trailer, hdr_info,
htc_hdr->eid);
if (status != 0)
goto free_skb;
}
if (((int) payload_len - (int) trailerlen) <= 0) {
/* zero length packet with trailer, just drop these */
goto free_skb;
}
if (htc_hdr->eid == ENDPOINT_0) {
/* handle HTC control message */
if (target->htc_flags & HTC_OP_STATE_SETUP_COMPLETE) {
/*
* fatal: target should not send unsolicited
* messageson the endpoint 0
*/
ath6kl_dbg(ATH6KL_DBG_HTC,
"HTC ignores Rx Ctrl after setup complete\n");
status = -EINVAL;
goto free_skb;
}
/* remove HTC header */
skb_pull(skb, HTC_HDR_LENGTH);
netdata = skb->data;
netlen = skb->len;
spin_lock_bh(&target->rx_lock);
target->pipe.ctrl_response_valid = true;
target->pipe.ctrl_response_len = min_t(int, netlen,
HTC_MAX_CTRL_MSG_LEN);
memcpy(target->pipe.ctrl_response_buf, netdata,
target->pipe.ctrl_response_len);
spin_unlock_bh(&target->rx_lock);
dev_kfree_skb(skb);
skb = NULL;
goto free_skb;
}
/*
* TODO: the message based HIF architecture allocates net bufs
* for recv packets since it bridges that HIF to upper layers,
* which expects HTC packets, we form the packets here
*/
packet = alloc_htc_packet_container(target);
if (packet == NULL) {
status = -ENOMEM;
goto free_skb;
}
packet->status = 0;
packet->endpoint = htc_hdr->eid;
packet->pkt_cntxt = skb;
/* TODO: for backwards compatibility */
packet->buf = skb_push(skb, 0) + HTC_HDR_LENGTH;
packet->act_len = netlen - HTC_HDR_LENGTH - trailerlen;
/*
* TODO: this is a hack because the driver layer will set the
* actual len of the skb again which will just double the len
*/
skb_trim(skb, 0);
recv_packet_completion(target, ep, packet);
/* recover the packet container */
free_htc_packet_container(target, packet);
skb = NULL;
free_skb:
if (skb != NULL)
dev_kfree_skb(skb);
return status;
}
static void htc_flush_rx_queue(struct htc_target *target,
struct htc_endpoint *ep)
{
struct list_head container;
struct htc_packet *packet;
spin_lock_bh(&target->rx_lock);
while (1) {
if (list_empty(&ep->rx_bufq))
break;
packet = list_first_entry(&ep->rx_bufq,
struct htc_packet, list);
list_del(&packet->list);
spin_unlock_bh(&target->rx_lock);
packet->status = -ECANCELED;
packet->act_len = 0;
ath6kl_dbg(ATH6KL_DBG_HTC,
"Flushing RX packet:0x%p, length:%d, ep:%d\n",
packet, packet->buf_len,
packet->endpoint);
INIT_LIST_HEAD(&container);
list_add_tail(&packet->list, &container);
/* give the packet back */
do_recv_completion(ep, &container);
spin_lock_bh(&target->rx_lock);
}
spin_unlock_bh(&target->rx_lock);
}
/* polling routine to wait for a control packet to be received */
static int htc_wait_recv_ctrl_message(struct htc_target *target)
{
int count = HTC_TARGET_RESPONSE_POLL_COUNT;
while (count > 0) {
spin_lock_bh(&target->rx_lock);
if (target->pipe.ctrl_response_valid) {
target->pipe.ctrl_response_valid = false;
spin_unlock_bh(&target->rx_lock);
break;
}
spin_unlock_bh(&target->rx_lock);
count--;
msleep_interruptible(HTC_TARGET_RESPONSE_POLL_WAIT);
}
if (count <= 0) {
ath6kl_dbg(ATH6KL_DBG_HTC, "%s: Timeout!\n", __func__);
return -ECOMM;
}
return 0;
}
static void htc_rxctrl_complete(struct htc_target *context,
struct htc_packet *packet)
{
/* TODO, can't really receive HTC control messages yet.... */
ath6kl_dbg(ATH6KL_DBG_HTC, "%s: invalid call function\n", __func__);
}
/* htc pipe initialization */
static void reset_endpoint_states(struct htc_target *target)
{
struct htc_endpoint *ep;
int i;
for (i = ENDPOINT_0; i < ENDPOINT_MAX; i++) {
ep = &target->endpoint[i];
ep->svc_id = 0;
ep->len_max = 0;
ep->max_txq_depth = 0;
ep->eid = i;
INIT_LIST_HEAD(&ep->txq);
INIT_LIST_HEAD(&ep->pipe.tx_lookup_queue);
INIT_LIST_HEAD(&ep->rx_bufq);
ep->target = target;
ep->pipe.tx_credit_flow_enabled = (bool) 1; /* FIXME */
}
}
/* start HTC, this is called after all services are connected */
static int htc_config_target_hif_pipe(struct htc_target *target)
{
return 0;
}
/* htc service functions */
static u8 htc_get_credit_alloc(struct htc_target *target, u16 service_id)
{
u8 allocation = 0;
int i;
for (i = 0; i < ENDPOINT_MAX; i++) {
if (target->pipe.txcredit_alloc[i].service_id == service_id)
allocation =
target->pipe.txcredit_alloc[i].credit_alloc;
}
if (allocation == 0) {
ath6kl_dbg(ATH6KL_DBG_HTC,
"HTC Service TX : 0x%2.2X : allocation is zero!\n",
service_id);
}
return allocation;
}
static int ath6kl_htc_pipe_conn_service(struct htc_target *target,
struct htc_service_connect_req *conn_req,
struct htc_service_connect_resp *conn_resp)
{
struct ath6kl *ar = target->dev->ar;
struct htc_packet *packet = NULL;
struct htc_conn_service_resp *resp_msg;
struct htc_conn_service_msg *conn_msg;
enum htc_endpoint_id assigned_epid = ENDPOINT_MAX;
bool disable_credit_flowctrl = false;
unsigned int max_msg_size = 0;
struct htc_endpoint *ep;
int length, status = 0;
struct sk_buff *skb;
u8 tx_alloc;
u16 flags;
if (conn_req->svc_id == 0) {
WARN_ON_ONCE(1);
status = -EINVAL;
goto free_packet;
}
if (conn_req->svc_id == HTC_CTRL_RSVD_SVC) {
/* special case for pseudo control service */
assigned_epid = ENDPOINT_0;
max_msg_size = HTC_MAX_CTRL_MSG_LEN;
tx_alloc = 0;
} else {
tx_alloc = htc_get_credit_alloc(target, conn_req->svc_id);
if (tx_alloc == 0) {
status = -ENOMEM;
goto free_packet;
}
/* allocate a packet to send to the target */
packet = htc_alloc_txctrl_packet(target);
if (packet == NULL) {
WARN_ON_ONCE(1);
status = -ENOMEM;
goto free_packet;
}
skb = packet->skb;
length = sizeof(struct htc_conn_service_msg);
/* assemble connect service message */
conn_msg = (struct htc_conn_service_msg *) skb_put(skb,
length);
if (conn_msg == NULL) {
WARN_ON_ONCE(1);
status = -EINVAL;
goto free_packet;
}
memset(conn_msg, 0,
sizeof(struct htc_conn_service_msg));
conn_msg->msg_id = cpu_to_le16(HTC_MSG_CONN_SVC_ID);
conn_msg->svc_id = cpu_to_le16(conn_req->svc_id);
conn_msg->conn_flags = cpu_to_le16(conn_req->conn_flags &
~HTC_CONN_FLGS_SET_RECV_ALLOC_MASK);
/* tell target desired recv alloc for this ep */
flags = tx_alloc << HTC_CONN_FLGS_SET_RECV_ALLOC_SHIFT;
conn_msg->conn_flags |= cpu_to_le16(flags);
if (conn_req->conn_flags &
HTC_CONN_FLGS_DISABLE_CRED_FLOW_CTRL) {
disable_credit_flowctrl = true;
}
set_htc_pkt_info(packet, NULL, (u8 *) conn_msg,
length,
ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
status = ath6kl_htc_pipe_tx(target, packet);
/* we don't own it anymore */
packet = NULL;
if (status != 0)
goto free_packet;
/* wait for response */
status = htc_wait_recv_ctrl_message(target);
if (status != 0)
goto free_packet;
/* we controlled the buffer creation so it has to be
* properly aligned
*/
resp_msg = (struct htc_conn_service_resp *)
target->pipe.ctrl_response_buf;
if (resp_msg->msg_id != cpu_to_le16(HTC_MSG_CONN_SVC_RESP_ID) ||
(target->pipe.ctrl_response_len < sizeof(*resp_msg))) {
/* this message is not valid */
WARN_ON_ONCE(1);
status = -EINVAL;
goto free_packet;
}
ath6kl_dbg(ATH6KL_DBG_TRC,
"%s: service 0x%X conn resp: status: %d ep: %d\n",
__func__, resp_msg->svc_id, resp_msg->status,
resp_msg->eid);
conn_resp->resp_code = resp_msg->status;
/* check response status */
if (resp_msg->status != HTC_SERVICE_SUCCESS) {
ath6kl_dbg(ATH6KL_DBG_HTC,
"Target failed service 0x%X connect request (status:%d)\n",
resp_msg->svc_id, resp_msg->status);
status = -EINVAL;
goto free_packet;
}
assigned_epid = (enum htc_endpoint_id) resp_msg->eid;
max_msg_size = le16_to_cpu(resp_msg->max_msg_sz);
}
/* the rest are parameter checks so set the error status */
status = -EINVAL;
if (assigned_epid >= ENDPOINT_MAX) {
WARN_ON_ONCE(1);
goto free_packet;
}
if (max_msg_size == 0) {
WARN_ON_ONCE(1);
goto free_packet;
}
ep = &target->endpoint[assigned_epid];
ep->eid = assigned_epid;
if (ep->svc_id != 0) {
/* endpoint already in use! */
WARN_ON_ONCE(1);
goto free_packet;
}
/* return assigned endpoint to caller */
conn_resp->endpoint = assigned_epid;
conn_resp->len_max = max_msg_size;
/* setup the endpoint */
ep->svc_id = conn_req->svc_id; /* this marks ep in use */
ep->max_txq_depth = conn_req->max_txq_depth;
ep->len_max = max_msg_size;
ep->cred_dist.credits = tx_alloc;
ep->cred_dist.cred_sz = target->tgt_cred_sz;
ep->cred_dist.cred_per_msg = max_msg_size / target->tgt_cred_sz;
if (max_msg_size % target->tgt_cred_sz)
ep->cred_dist.cred_per_msg++;
/* copy all the callbacks */
ep->ep_cb = conn_req->ep_cb;
status = ath6kl_hif_pipe_map_service(ar, ep->svc_id,
&ep->pipe.pipeid_ul,
&ep->pipe.pipeid_dl);
if (status != 0)
goto free_packet;
ath6kl_dbg(ATH6KL_DBG_HTC,
"SVC Ready: 0x%4.4X: ULpipe:%d DLpipe:%d id:%d\n",
ep->svc_id, ep->pipe.pipeid_ul,
ep->pipe.pipeid_dl, ep->eid);
if (disable_credit_flowctrl && ep->pipe.tx_credit_flow_enabled) {
ep->pipe.tx_credit_flow_enabled = false;
ath6kl_dbg(ATH6KL_DBG_HTC,
"SVC: 0x%4.4X ep:%d TX flow control off\n",
ep->svc_id, assigned_epid);
}
free_packet:
if (packet != NULL)
htc_free_txctrl_packet(target, packet);
return status;
}
/* htc export functions */
static void *ath6kl_htc_pipe_create(struct ath6kl *ar)
{
int status = 0;
struct htc_endpoint *ep = NULL;
struct htc_target *target = NULL;
struct htc_packet *packet;
int i;
target = kzalloc(sizeof(struct htc_target), GFP_KERNEL);
if (target == NULL) {
ath6kl_err("htc create unable to allocate memory\n");
status = -ENOMEM;
goto fail_htc_create;
}
spin_lock_init(&target->htc_lock);
spin_lock_init(&target->rx_lock);
spin_lock_init(&target->tx_lock);
reset_endpoint_states(target);
for (i = 0; i < HTC_PACKET_CONTAINER_ALLOCATION; i++) {
packet = kzalloc(sizeof(struct htc_packet), GFP_KERNEL);
if (packet != NULL)
free_htc_packet_container(target, packet);
}
target->dev = kzalloc(sizeof(*target->dev), GFP_KERNEL);
if (!target->dev) {
ath6kl_err("unable to allocate memory\n");
status = -ENOMEM;
goto fail_htc_create;
}
target->dev->ar = ar;
target->dev->htc_cnxt = target;
/* Get HIF default pipe for HTC message exchange */
ep = &target->endpoint[ENDPOINT_0];
ath6kl_hif_pipe_get_default(ar, &ep->pipe.pipeid_ul,
&ep->pipe.pipeid_dl);
return target;
fail_htc_create:
if (status != 0) {
if (target != NULL)
ath6kl_htc_pipe_cleanup(target);
target = NULL;
}
return target;
}
/* cleanup the HTC instance */
static void ath6kl_htc_pipe_cleanup(struct htc_target *target)
{
struct htc_packet *packet;
while (true) {
packet = alloc_htc_packet_container(target);
if (packet == NULL)
break;
kfree(packet);
}
kfree(target->dev);
/* kfree our instance */
kfree(target);
}
static int ath6kl_htc_pipe_start(struct htc_target *target)
{
struct sk_buff *skb;
struct htc_setup_comp_ext_msg *setup;
struct htc_packet *packet;
htc_config_target_hif_pipe(target);
/* allocate a buffer to send */
packet = htc_alloc_txctrl_packet(target);
if (packet == NULL) {
WARN_ON_ONCE(1);
return -ENOMEM;
}
skb = packet->skb;
/* assemble setup complete message */
setup = (struct htc_setup_comp_ext_msg *) skb_put(skb,
sizeof(*setup));
memset(setup, 0, sizeof(struct htc_setup_comp_ext_msg));
setup->msg_id = cpu_to_le16(HTC_MSG_SETUP_COMPLETE_EX_ID);
ath6kl_dbg(ATH6KL_DBG_HTC, "HTC using TX credit flow control\n");
set_htc_pkt_info(packet, NULL, (u8 *) setup,
sizeof(struct htc_setup_comp_ext_msg),
ENDPOINT_0, HTC_SERVICE_TX_PACKET_TAG);
target->htc_flags |= HTC_OP_STATE_SETUP_COMPLETE;
return ath6kl_htc_pipe_tx(target, packet);
}
static void ath6kl_htc_pipe_stop(struct htc_target *target)
{
int i;
struct htc_endpoint *ep;
/* cleanup endpoints */
for (i = 0; i < ENDPOINT_MAX; i++) {
ep = &target->endpoint[i];
htc_flush_rx_queue(target, ep);
htc_flush_tx_endpoint(target, ep, HTC_TX_PACKET_TAG_ALL);
}
reset_endpoint_states(target);
target->htc_flags &= ~HTC_OP_STATE_SETUP_COMPLETE;
}
static int ath6kl_htc_pipe_get_rxbuf_num(struct htc_target *target,
enum htc_endpoint_id endpoint)
{
int num;
spin_lock_bh(&target->rx_lock);
num = get_queue_depth(&(target->endpoint[endpoint].rx_bufq));
spin_unlock_bh(&target->rx_lock);
return num;
}
static int ath6kl_htc_pipe_tx(struct htc_target *target,
struct htc_packet *packet)
{
struct list_head queue;
ath6kl_dbg(ATH6KL_DBG_HTC,
"%s: endPointId: %d, buffer: 0x%p, length: %d\n",
__func__, packet->endpoint, packet->buf,
packet->act_len);
INIT_LIST_HEAD(&queue);
list_add_tail(&packet->list, &queue);
return htc_send_packets_multiple(target, &queue);
}
static int ath6kl_htc_pipe_wait_target(struct htc_target *target)
{
struct htc_ready_ext_msg *ready_msg;
struct htc_service_connect_req connect;
struct htc_service_connect_resp resp;
int status = 0;
status = htc_wait_recv_ctrl_message(target);
if (status != 0)
return status;
if (target->pipe.ctrl_response_len < sizeof(*ready_msg)) {
ath6kl_dbg(ATH6KL_DBG_HTC, "invalid htc ready msg len:%d!\n",
target->pipe.ctrl_response_len);
return -ECOMM;
}
ready_msg = (struct htc_ready_ext_msg *) target->pipe.ctrl_response_buf;
if (ready_msg->ver2_0_info.msg_id != cpu_to_le16(HTC_MSG_READY_ID)) {
ath6kl_dbg(ATH6KL_DBG_HTC, "invalid htc ready msg : 0x%X !\n",
ready_msg->ver2_0_info.msg_id);
return -ECOMM;
}
ath6kl_dbg(ATH6KL_DBG_HTC,
"Target Ready! : transmit resources : %d size:%d\n",
ready_msg->ver2_0_info.cred_cnt,
ready_msg->ver2_0_info.cred_sz);
target->tgt_creds = le16_to_cpu(ready_msg->ver2_0_info.cred_cnt);
target->tgt_cred_sz = le16_to_cpu(ready_msg->ver2_0_info.cred_sz);
if ((target->tgt_creds == 0) || (target->tgt_cred_sz == 0))
return -ECOMM;
htc_setup_target_buffer_assignments(target);
/* setup our pseudo HTC control endpoint connection */
memset(&connect, 0, sizeof(connect));
memset(&resp, 0, sizeof(resp));
connect.ep_cb.tx_complete = htc_txctrl_complete;
connect.ep_cb.rx = htc_rxctrl_complete;
connect.max_txq_depth = NUM_CONTROL_TX_BUFFERS;
connect.svc_id = HTC_CTRL_RSVD_SVC;
/* connect fake service */
status = ath6kl_htc_pipe_conn_service(target, &connect, &resp);
return status;
}
static void ath6kl_htc_pipe_flush_txep(struct htc_target *target,
enum htc_endpoint_id endpoint, u16 tag)
{
struct htc_endpoint *ep = &target->endpoint[endpoint];
if (ep->svc_id == 0) {
WARN_ON_ONCE(1);
/* not in use.. */
return;
}
htc_flush_tx_endpoint(target, ep, tag);
}
static int ath6kl_htc_pipe_add_rxbuf_multiple(struct htc_target *target,
struct list_head *pkt_queue)
{
struct htc_packet *packet, *tmp_pkt, *first;
struct htc_endpoint *ep;
int status = 0;
if (list_empty(pkt_queue))
return -EINVAL;
first = list_first_entry(pkt_queue, struct htc_packet, list);
if (first == NULL) {
WARN_ON_ONCE(1);
return -EINVAL;
}
if (first->endpoint >= ENDPOINT_MAX) {
WARN_ON_ONCE(1);
return -EINVAL;
}
ath6kl_dbg(ATH6KL_DBG_HTC, "%s: epid: %d, cnt:%d, len: %d\n",
__func__, first->endpoint, get_queue_depth(pkt_queue),
first->buf_len);
ep = &target->endpoint[first->endpoint];
spin_lock_bh(&target->rx_lock);
/* store receive packets */
list_splice_tail_init(pkt_queue, &ep->rx_bufq);
spin_unlock_bh(&target->rx_lock);
if (status != 0) {
/* walk through queue and mark each one canceled */
list_for_each_entry_safe(packet, tmp_pkt, pkt_queue, list) {
packet->status = -ECANCELED;
}
do_recv_completion(ep, pkt_queue);
}
return status;
}
static void ath6kl_htc_pipe_activity_changed(struct htc_target *target,
enum htc_endpoint_id ep,
bool active)
{
/* TODO */
}
static void ath6kl_htc_pipe_flush_rx_buf(struct htc_target *target)
{
/* TODO */
}
static int ath6kl_htc_pipe_credit_setup(struct htc_target *target,
struct ath6kl_htc_credit_info *info)
{
return 0;
}
static const struct ath6kl_htc_ops ath6kl_htc_pipe_ops = {
.create = ath6kl_htc_pipe_create,
.wait_target = ath6kl_htc_pipe_wait_target,
.start = ath6kl_htc_pipe_start,
.conn_service = ath6kl_htc_pipe_conn_service,
.tx = ath6kl_htc_pipe_tx,
.stop = ath6kl_htc_pipe_stop,
.cleanup = ath6kl_htc_pipe_cleanup,
.flush_txep = ath6kl_htc_pipe_flush_txep,
.flush_rx_buf = ath6kl_htc_pipe_flush_rx_buf,
.activity_changed = ath6kl_htc_pipe_activity_changed,
.get_rxbuf_num = ath6kl_htc_pipe_get_rxbuf_num,
.add_rxbuf_multiple = ath6kl_htc_pipe_add_rxbuf_multiple,
.credit_setup = ath6kl_htc_pipe_credit_setup,
.tx_complete = ath6kl_htc_pipe_tx_complete,
.rx_complete = ath6kl_htc_pipe_rx_complete,
};
void ath6kl_htc_pipe_attach(struct ath6kl *ar)
{
ar->htc_ops = &ath6kl_htc_pipe_ops;
}
......@@ -23,12 +23,14 @@
#include <linux/export.h>
#include <linux/of.h>
#include <linux/mmc/sdio_func.h>
#include <linux/vmalloc.h>
#include "core.h"
#include "cfg80211.h"
#include "target.h"
#include "debug.h"
#include "hif-ops.h"
#include "htc-ops.h"
static const struct ath6kl_hw hw_list[] = {
{
......@@ -258,6 +260,7 @@ static int ath6kl_init_service_ep(struct ath6kl *ar)
memset(&connect, 0, sizeof(connect));
/* these fields are the same for all service endpoints */
connect.ep_cb.tx_comp_multi = ath6kl_tx_complete;
connect.ep_cb.rx = ath6kl_rx;
connect.ep_cb.rx_refill = ath6kl_rx_refill;
connect.ep_cb.tx_full = ath6kl_tx_queue_full;
......@@ -487,22 +490,31 @@ int ath6kl_configure_target(struct ath6kl *ar)
fw_mode |= fw_iftype << (i * HI_OPTION_FW_MODE_BITS);
/*
* By default, submodes :
* Submodes when fw does not support dynamic interface
* switching:
* vif[0] - AP/STA/IBSS
* vif[1] - "P2P dev"/"P2P GO"/"P2P Client"
* vif[2] - "P2P dev"/"P2P GO"/"P2P Client"
* Otherwise, All the interface are initialized to p2p dev.
*/
for (i = 0; i < ar->max_norm_iface; i++)
fw_submode |= HI_OPTION_FW_SUBMODE_NONE <<
(i * HI_OPTION_FW_SUBMODE_BITS);
if (test_bit(ATH6KL_FW_CAPABILITY_STA_P2PDEV_DUPLEX,
ar->fw_capabilities)) {
for (i = 0; i < ar->vif_max; i++)
fw_submode |= HI_OPTION_FW_SUBMODE_P2PDEV <<
(i * HI_OPTION_FW_SUBMODE_BITS);
} else {
for (i = 0; i < ar->max_norm_iface; i++)
fw_submode |= HI_OPTION_FW_SUBMODE_NONE <<
(i * HI_OPTION_FW_SUBMODE_BITS);
for (i = ar->max_norm_iface; i < ar->vif_max; i++)
fw_submode |= HI_OPTION_FW_SUBMODE_P2PDEV <<
(i * HI_OPTION_FW_SUBMODE_BITS);
for (i = ar->max_norm_iface; i < ar->vif_max; i++)
fw_submode |= HI_OPTION_FW_SUBMODE_P2PDEV <<
(i * HI_OPTION_FW_SUBMODE_BITS);
if (ar->p2p && ar->vif_max == 1)
fw_submode = HI_OPTION_FW_SUBMODE_P2PDEV;
if (ar->p2p && ar->vif_max == 1)
fw_submode = HI_OPTION_FW_SUBMODE_P2PDEV;
}
if (ath6kl_bmi_write_hi32(ar, hi_app_host_interest,
HTC_PROTOCOL_VERSION) != 0) {
......@@ -541,18 +553,20 @@ int ath6kl_configure_target(struct ath6kl *ar)
* but possible in theory.
*/
param = ar->hw.board_ext_data_addr;
ram_reserved_size = ar->hw.reserved_ram_size;
if (ar->target_type == TARGET_TYPE_AR6003) {
param = ar->hw.board_ext_data_addr;
ram_reserved_size = ar->hw.reserved_ram_size;
if (ath6kl_bmi_write_hi32(ar, hi_board_ext_data, param) != 0) {
ath6kl_err("bmi_write_memory for hi_board_ext_data failed\n");
return -EIO;
}
if (ath6kl_bmi_write_hi32(ar, hi_board_ext_data, param) != 0) {
ath6kl_err("bmi_write_memory for hi_board_ext_data failed\n");
return -EIO;
}
if (ath6kl_bmi_write_hi32(ar, hi_end_ram_reserve_sz,
ram_reserved_size) != 0) {
ath6kl_err("bmi_write_memory for hi_end_ram_reserve_sz failed\n");
return -EIO;
if (ath6kl_bmi_write_hi32(ar, hi_end_ram_reserve_sz,
ram_reserved_size) != 0) {
ath6kl_err("bmi_write_memory for hi_end_ram_reserve_sz failed\n");
return -EIO;
}
}
/* set the block size for the target */
......@@ -926,13 +940,14 @@ static int ath6kl_fetch_fw_apin(struct ath6kl *ar, const char *name)
if (ar->fw != NULL)
break;
ar->fw = kmemdup(data, ie_len, GFP_KERNEL);
ar->fw = vmalloc(ie_len);
if (ar->fw == NULL) {
ret = -ENOMEM;
goto out;
}
memcpy(ar->fw, data, ie_len);
ar->fw_len = ie_len;
break;
case ATH6KL_FW_IE_PATCH_IMAGE:
......@@ -1509,7 +1524,7 @@ int ath6kl_init_hw_start(struct ath6kl *ar)
}
/* setup credit distribution */
ath6kl_credit_setup(ar->htc_target, &ar->credit_state_info);
ath6kl_htc_credit_setup(ar->htc_target, &ar->credit_state_info);
/* start HTC */
ret = ath6kl_htc_start(ar->htc_target);
......
......@@ -758,6 +758,10 @@ static void ath6kl_update_target_stats(struct ath6kl_vif *vif, u8 *ptr, u32 len)
stats->wow_evt_discarded +=
le16_to_cpu(tgt_stats->wow_stats.wow_evt_discarded);
stats->arp_received = le32_to_cpu(tgt_stats->arp_stats.arp_received);
stats->arp_replied = le32_to_cpu(tgt_stats->arp_stats.arp_replied);
stats->arp_matched = le32_to_cpu(tgt_stats->arp_stats.arp_matched);
if (test_bit(STATS_UPDATE_PEND, &vif->flags)) {
clear_bit(STATS_UPDATE_PEND, &vif->flags);
wake_up(&ar->event_wq);
......
......@@ -1362,7 +1362,7 @@ static int ath6kl_sdio_probe(struct sdio_func *func,
goto err_core_alloc;
}
ret = ath6kl_core_init(ar);
ret = ath6kl_core_init(ar, ATH6KL_HTC_TYPE_MBOX);
if (ret) {
ath6kl_err("Failed to init ath6kl core\n");
goto err_core_alloc;
......
......@@ -19,6 +19,7 @@
#include "core.h"
#include "debug.h"
#include "htc-ops.h"
/*
* tid - tid_mux0..tid_mux3
......@@ -324,6 +325,7 @@ int ath6kl_control_tx(void *devt, struct sk_buff *skb,
cookie->map_no = 0;
set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
eid, ATH6KL_CONTROL_PKT_TAG);
cookie->htc_pkt.skb = skb;
/*
* This interface is asynchronous, if there is an error, cleanup
......@@ -492,6 +494,7 @@ int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
cookie->map_no = map_no;
set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
eid, htc_tag);
cookie->htc_pkt.skb = skb;
ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "tx ",
skb->data, skb->len);
......@@ -572,7 +575,7 @@ void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active)
notify_htc:
/* notify HTC, this may cause credit distribution changes */
ath6kl_htc_indicate_activity_change(ar->htc_target, eid, active);
ath6kl_htc_activity_changed(ar->htc_target, eid, active);
}
enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
......@@ -668,9 +671,10 @@ static void ath6kl_tx_clear_node_map(struct ath6kl_vif *vif,
}
}
void ath6kl_tx_complete(void *context, struct list_head *packet_queue)
void ath6kl_tx_complete(struct htc_target *target,
struct list_head *packet_queue)
{
struct ath6kl *ar = context;
struct ath6kl *ar = target->dev->ar;
struct sk_buff_head skb_queue;
struct htc_packet *packet;
struct sk_buff *skb;
......@@ -889,6 +893,7 @@ void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint)
skb->data = PTR_ALIGN(skb->data - 4, 4);
set_htc_rxpkt_info(packet, skb, skb->data,
ATH6KL_BUFFER_SIZE, endpoint);
packet->skb = skb;
list_add_tail(&packet->list, &queue);
}
......@@ -911,6 +916,8 @@ void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count)
skb->data = PTR_ALIGN(skb->data - 4, 4);
set_htc_rxpkt_info(packet, skb, skb->data,
ATH6KL_AMSDU_BUFFER_SIZE, 0);
packet->skb = skb;
spin_lock_bh(&ar->lock);
list_add_tail(&packet->list, &ar->amsdu_rx_buffer_queue);
spin_unlock_bh(&ar->lock);
......@@ -1283,6 +1290,7 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
struct wmi_data_hdr *dhdr;
int min_hdr_len;
u8 meta_type, dot11_hdr = 0;
u8 pad_before_data_start;
int status = packet->status;
enum htc_endpoint_id ept = packet->endpoint;
bool is_amsdu, prev_ps, ps_state = false;
......@@ -1494,6 +1502,10 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
seq_no = wmi_data_hdr_get_seqno(dhdr);
meta_type = wmi_data_hdr_get_meta(dhdr);
dot11_hdr = wmi_data_hdr_get_dot11(dhdr);
pad_before_data_start =
(le16_to_cpu(dhdr->info3) >> WMI_DATA_HDR_PAD_BEFORE_DATA_SHIFT)
& WMI_DATA_HDR_PAD_BEFORE_DATA_MASK;
skb_pull(skb, sizeof(struct wmi_data_hdr));
switch (meta_type) {
......@@ -1512,6 +1524,8 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
break;
}
skb_pull(skb, pad_before_data_start);
if (dot11_hdr)
status = ath6kl_wmi_dot11_hdr_remove(ar->wmi, skb);
else if (!is_amsdu)
......@@ -1581,7 +1595,8 @@ void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
/* aggregation code will handle the skb */
return;
}
}
} else if (!is_broadcast_ether_addr(datap->h_dest))
vif->net_stats.multicast++;
ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
}
......
......@@ -21,15 +21,77 @@
#include "debug.h"
#include "core.h"
/* constants */
#define TX_URB_COUNT 32
#define RX_URB_COUNT 32
#define ATH6KL_USB_RX_BUFFER_SIZE 1700
/* tx/rx pipes for usb */
enum ATH6KL_USB_PIPE_ID {
ATH6KL_USB_PIPE_TX_CTRL = 0,
ATH6KL_USB_PIPE_TX_DATA_LP,
ATH6KL_USB_PIPE_TX_DATA_MP,
ATH6KL_USB_PIPE_TX_DATA_HP,
ATH6KL_USB_PIPE_RX_CTRL,
ATH6KL_USB_PIPE_RX_DATA,
ATH6KL_USB_PIPE_RX_DATA2,
ATH6KL_USB_PIPE_RX_INT,
ATH6KL_USB_PIPE_MAX
};
#define ATH6KL_USB_PIPE_INVALID ATH6KL_USB_PIPE_MAX
struct ath6kl_usb_pipe {
struct list_head urb_list_head;
struct usb_anchor urb_submitted;
u32 urb_alloc;
u32 urb_cnt;
u32 urb_cnt_thresh;
unsigned int usb_pipe_handle;
u32 flags;
u8 ep_address;
u8 logical_pipe_num;
struct ath6kl_usb *ar_usb;
u16 max_packet_size;
struct work_struct io_complete_work;
struct sk_buff_head io_comp_queue;
struct usb_endpoint_descriptor *ep_desc;
};
#define ATH6KL_USB_PIPE_FLAG_TX (1 << 0)
/* usb device object */
struct ath6kl_usb {
/* protects pipe->urb_list_head and pipe->urb_cnt */
spinlock_t cs_lock;
struct usb_device *udev;
struct usb_interface *interface;
struct ath6kl_usb_pipe pipes[ATH6KL_USB_PIPE_MAX];
u8 *diag_cmd_buffer;
u8 *diag_resp_buffer;
struct ath6kl *ar;
};
/* usb urb object */
struct ath6kl_urb_context {
struct list_head link;
struct ath6kl_usb_pipe *pipe;
struct sk_buff *skb;
struct ath6kl *ar;
};
/* USB endpoint definitions */
#define ATH6KL_USB_EP_ADDR_APP_CTRL_IN 0x81
#define ATH6KL_USB_EP_ADDR_APP_DATA_IN 0x82
#define ATH6KL_USB_EP_ADDR_APP_DATA2_IN 0x83
#define ATH6KL_USB_EP_ADDR_APP_INT_IN 0x84
#define ATH6KL_USB_EP_ADDR_APP_CTRL_OUT 0x01
#define ATH6KL_USB_EP_ADDR_APP_DATA_LP_OUT 0x02
#define ATH6KL_USB_EP_ADDR_APP_DATA_MP_OUT 0x03
#define ATH6KL_USB_EP_ADDR_APP_DATA_HP_OUT 0x04
/* diagnostic command defnitions */
#define ATH6KL_USB_CONTROL_REQ_SEND_BMI_CMD 1
#define ATH6KL_USB_CONTROL_REQ_RECV_BMI_RESP 2
......@@ -55,11 +117,493 @@ struct ath6kl_usb_ctrl_diag_resp_read {
__le32 value;
} __packed;
/* function declarations */
static void ath6kl_usb_recv_complete(struct urb *urb);
#define ATH6KL_USB_IS_BULK_EP(attr) (((attr) & 3) == 0x02)
#define ATH6KL_USB_IS_INT_EP(attr) (((attr) & 3) == 0x03)
#define ATH6KL_USB_IS_ISOC_EP(attr) (((attr) & 3) == 0x01)
#define ATH6KL_USB_IS_DIR_IN(addr) ((addr) & 0x80)
/* pipe/urb operations */
static struct ath6kl_urb_context *
ath6kl_usb_alloc_urb_from_pipe(struct ath6kl_usb_pipe *pipe)
{
struct ath6kl_urb_context *urb_context = NULL;
unsigned long flags;
spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags);
if (!list_empty(&pipe->urb_list_head)) {
urb_context =
list_first_entry(&pipe->urb_list_head,
struct ath6kl_urb_context, link);
list_del(&urb_context->link);
pipe->urb_cnt--;
}
spin_unlock_irqrestore(&pipe->ar_usb->cs_lock, flags);
return urb_context;
}
static void ath6kl_usb_free_urb_to_pipe(struct ath6kl_usb_pipe *pipe,
struct ath6kl_urb_context *urb_context)
{
unsigned long flags;
spin_lock_irqsave(&pipe->ar_usb->cs_lock, flags);
pipe->urb_cnt++;
list_add(&urb_context->link, &pipe->urb_list_head);
spin_unlock_irqrestore(&pipe->ar_usb->cs_lock, flags);
}
static void ath6kl_usb_cleanup_recv_urb(struct ath6kl_urb_context *urb_context)
{
if (urb_context->skb != NULL) {
dev_kfree_skb(urb_context->skb);
urb_context->skb = NULL;
}
ath6kl_usb_free_urb_to_pipe(urb_context->pipe, urb_context);
}
static inline struct ath6kl_usb *ath6kl_usb_priv(struct ath6kl *ar)
{
return ar->hif_priv;
}
/* pipe resource allocation/cleanup */
static int ath6kl_usb_alloc_pipe_resources(struct ath6kl_usb_pipe *pipe,
int urb_cnt)
{
struct ath6kl_urb_context *urb_context;
int status = 0, i;
INIT_LIST_HEAD(&pipe->urb_list_head);
init_usb_anchor(&pipe->urb_submitted);
for (i = 0; i < urb_cnt; i++) {
urb_context = kzalloc(sizeof(struct ath6kl_urb_context),
GFP_KERNEL);
if (urb_context == NULL)
/* FIXME: set status to -ENOMEM */
break;
urb_context->pipe = pipe;
/*
* we are only allocate the urb contexts here, the actual URB
* is allocated from the kernel as needed to do a transaction
*/
pipe->urb_alloc++;
ath6kl_usb_free_urb_to_pipe(pipe, urb_context);
}
ath6kl_dbg(ATH6KL_DBG_USB,
"ath6kl usb: alloc resources lpipe:%d hpipe:0x%X urbs:%d\n",
pipe->logical_pipe_num, pipe->usb_pipe_handle,
pipe->urb_alloc);
return status;
}
static void ath6kl_usb_free_pipe_resources(struct ath6kl_usb_pipe *pipe)
{
struct ath6kl_urb_context *urb_context;
if (pipe->ar_usb == NULL) {
/* nothing allocated for this pipe */
return;
}
ath6kl_dbg(ATH6KL_DBG_USB,
"ath6kl usb: free resources lpipe:%d"
"hpipe:0x%X urbs:%d avail:%d\n",
pipe->logical_pipe_num, pipe->usb_pipe_handle,
pipe->urb_alloc, pipe->urb_cnt);
if (pipe->urb_alloc != pipe->urb_cnt) {
ath6kl_dbg(ATH6KL_DBG_USB,
"ath6kl usb: urb leak! lpipe:%d"
"hpipe:0x%X urbs:%d avail:%d\n",
pipe->logical_pipe_num, pipe->usb_pipe_handle,
pipe->urb_alloc, pipe->urb_cnt);
}
while (true) {
urb_context = ath6kl_usb_alloc_urb_from_pipe(pipe);
if (urb_context == NULL)
break;
kfree(urb_context);
}
}
static void ath6kl_usb_cleanup_pipe_resources(struct ath6kl_usb *ar_usb)
{
int i;
for (i = 0; i < ATH6KL_USB_PIPE_MAX; i++)
ath6kl_usb_free_pipe_resources(&ar_usb->pipes[i]);
}
static u8 ath6kl_usb_get_logical_pipe_num(struct ath6kl_usb *ar_usb,
u8 ep_address, int *urb_count)
{
u8 pipe_num = ATH6KL_USB_PIPE_INVALID;
switch (ep_address) {
case ATH6KL_USB_EP_ADDR_APP_CTRL_IN:
pipe_num = ATH6KL_USB_PIPE_RX_CTRL;
*urb_count = RX_URB_COUNT;
break;
case ATH6KL_USB_EP_ADDR_APP_DATA_IN:
pipe_num = ATH6KL_USB_PIPE_RX_DATA;
*urb_count = RX_URB_COUNT;
break;
case ATH6KL_USB_EP_ADDR_APP_INT_IN:
pipe_num = ATH6KL_USB_PIPE_RX_INT;
*urb_count = RX_URB_COUNT;
break;
case ATH6KL_USB_EP_ADDR_APP_DATA2_IN:
pipe_num = ATH6KL_USB_PIPE_RX_DATA2;
*urb_count = RX_URB_COUNT;
break;
case ATH6KL_USB_EP_ADDR_APP_CTRL_OUT:
pipe_num = ATH6KL_USB_PIPE_TX_CTRL;
*urb_count = TX_URB_COUNT;
break;
case ATH6KL_USB_EP_ADDR_APP_DATA_LP_OUT:
pipe_num = ATH6KL_USB_PIPE_TX_DATA_LP;
*urb_count = TX_URB_COUNT;
break;
case ATH6KL_USB_EP_ADDR_APP_DATA_MP_OUT:
pipe_num = ATH6KL_USB_PIPE_TX_DATA_MP;
*urb_count = TX_URB_COUNT;
break;
case ATH6KL_USB_EP_ADDR_APP_DATA_HP_OUT:
pipe_num = ATH6KL_USB_PIPE_TX_DATA_HP;
*urb_count = TX_URB_COUNT;
break;
default:
/* note: there may be endpoints not currently used */
break;
}
return pipe_num;
}
static int ath6kl_usb_setup_pipe_resources(struct ath6kl_usb *ar_usb)
{
struct usb_interface *interface = ar_usb->interface;
struct usb_host_interface *iface_desc = interface->cur_altsetting;
struct usb_endpoint_descriptor *endpoint;
struct ath6kl_usb_pipe *pipe;
int i, urbcount, status = 0;
u8 pipe_num;
ath6kl_dbg(ATH6KL_DBG_USB, "setting up USB Pipes using interface\n");
/* walk decriptors and setup pipes */
for (i = 0; i < iface_desc->desc.bNumEndpoints; ++i) {
endpoint = &iface_desc->endpoint[i].desc;
if (ATH6KL_USB_IS_BULK_EP(endpoint->bmAttributes)) {
ath6kl_dbg(ATH6KL_DBG_USB,
"%s Bulk Ep:0x%2.2X maxpktsz:%d\n",
ATH6KL_USB_IS_DIR_IN
(endpoint->bEndpointAddress) ?
"RX" : "TX", endpoint->bEndpointAddress,
le16_to_cpu(endpoint->wMaxPacketSize));
} else if (ATH6KL_USB_IS_INT_EP(endpoint->bmAttributes)) {
ath6kl_dbg(ATH6KL_DBG_USB,
"%s Int Ep:0x%2.2X maxpktsz:%d interval:%d\n",
ATH6KL_USB_IS_DIR_IN
(endpoint->bEndpointAddress) ?
"RX" : "TX", endpoint->bEndpointAddress,
le16_to_cpu(endpoint->wMaxPacketSize),
endpoint->bInterval);
} else if (ATH6KL_USB_IS_ISOC_EP(endpoint->bmAttributes)) {
/* TODO for ISO */
ath6kl_dbg(ATH6KL_DBG_USB,
"%s ISOC Ep:0x%2.2X maxpktsz:%d interval:%d\n",
ATH6KL_USB_IS_DIR_IN
(endpoint->bEndpointAddress) ?
"RX" : "TX", endpoint->bEndpointAddress,
le16_to_cpu(endpoint->wMaxPacketSize),
endpoint->bInterval);
}
urbcount = 0;
pipe_num =
ath6kl_usb_get_logical_pipe_num(ar_usb,
endpoint->bEndpointAddress,
&urbcount);
if (pipe_num == ATH6KL_USB_PIPE_INVALID)
continue;
pipe = &ar_usb->pipes[pipe_num];
if (pipe->ar_usb != NULL) {
/* hmmm..pipe was already setup */
continue;
}
pipe->ar_usb = ar_usb;
pipe->logical_pipe_num = pipe_num;
pipe->ep_address = endpoint->bEndpointAddress;
pipe->max_packet_size = le16_to_cpu(endpoint->wMaxPacketSize);
if (ATH6KL_USB_IS_BULK_EP(endpoint->bmAttributes)) {
if (ATH6KL_USB_IS_DIR_IN(pipe->ep_address)) {
pipe->usb_pipe_handle =
usb_rcvbulkpipe(ar_usb->udev,
pipe->ep_address);
} else {
pipe->usb_pipe_handle =
usb_sndbulkpipe(ar_usb->udev,
pipe->ep_address);
}
} else if (ATH6KL_USB_IS_INT_EP(endpoint->bmAttributes)) {
if (ATH6KL_USB_IS_DIR_IN(pipe->ep_address)) {
pipe->usb_pipe_handle =
usb_rcvintpipe(ar_usb->udev,
pipe->ep_address);
} else {
pipe->usb_pipe_handle =
usb_sndintpipe(ar_usb->udev,
pipe->ep_address);
}
} else if (ATH6KL_USB_IS_ISOC_EP(endpoint->bmAttributes)) {
/* TODO for ISO */
if (ATH6KL_USB_IS_DIR_IN(pipe->ep_address)) {
pipe->usb_pipe_handle =
usb_rcvisocpipe(ar_usb->udev,
pipe->ep_address);
} else {
pipe->usb_pipe_handle =
usb_sndisocpipe(ar_usb->udev,
pipe->ep_address);
}
}
pipe->ep_desc = endpoint;
if (!ATH6KL_USB_IS_DIR_IN(pipe->ep_address))
pipe->flags |= ATH6KL_USB_PIPE_FLAG_TX;
status = ath6kl_usb_alloc_pipe_resources(pipe, urbcount);
if (status != 0)
break;
}
return status;
}
/* pipe operations */
static void ath6kl_usb_post_recv_transfers(struct ath6kl_usb_pipe *recv_pipe,
int buffer_length)
{
struct ath6kl_urb_context *urb_context;
struct urb *urb;
int usb_status;
while (true) {
urb_context = ath6kl_usb_alloc_urb_from_pipe(recv_pipe);
if (urb_context == NULL)
break;
urb_context->skb = dev_alloc_skb(buffer_length);
if (urb_context->skb == NULL)
goto err_cleanup_urb;
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (urb == NULL)
goto err_cleanup_urb;
usb_fill_bulk_urb(urb,
recv_pipe->ar_usb->udev,
recv_pipe->usb_pipe_handle,
urb_context->skb->data,
buffer_length,
ath6kl_usb_recv_complete, urb_context);
ath6kl_dbg(ATH6KL_DBG_USB_BULK,
"ath6kl usb: bulk recv submit:%d, 0x%X (ep:0x%2.2X), %d bytes buf:0x%p\n",
recv_pipe->logical_pipe_num,
recv_pipe->usb_pipe_handle, recv_pipe->ep_address,
buffer_length, urb_context->skb);
usb_anchor_urb(urb, &recv_pipe->urb_submitted);
usb_status = usb_submit_urb(urb, GFP_ATOMIC);
if (usb_status) {
ath6kl_dbg(ATH6KL_DBG_USB_BULK,
"ath6kl usb : usb bulk recv failed %d\n",
usb_status);
usb_unanchor_urb(urb);
usb_free_urb(urb);
goto err_cleanup_urb;
}
usb_free_urb(urb);
}
return;
err_cleanup_urb:
ath6kl_usb_cleanup_recv_urb(urb_context);
return;
}
static void ath6kl_usb_flush_all(struct ath6kl_usb *ar_usb)
{
int i;
for (i = 0; i < ATH6KL_USB_PIPE_MAX; i++) {
if (ar_usb->pipes[i].ar_usb != NULL)
usb_kill_anchored_urbs(&ar_usb->pipes[i].urb_submitted);
}
/*
* Flushing any pending I/O may schedule work this call will block
* until all scheduled work runs to completion.
*/
flush_scheduled_work();
}
static void ath6kl_usb_start_recv_pipes(struct ath6kl_usb *ar_usb)
{
/*
* note: control pipe is no longer used
* ar_usb->pipes[ATH6KL_USB_PIPE_RX_CTRL].urb_cnt_thresh =
* ar_usb->pipes[ATH6KL_USB_PIPE_RX_CTRL].urb_alloc/2;
* ath6kl_usb_post_recv_transfers(&ar_usb->
* pipes[ATH6KL_USB_PIPE_RX_CTRL],
* ATH6KL_USB_RX_BUFFER_SIZE);
*/
ar_usb->pipes[ATH6KL_USB_PIPE_RX_DATA].urb_cnt_thresh =
ar_usb->pipes[ATH6KL_USB_PIPE_RX_DATA].urb_alloc / 2;
ath6kl_usb_post_recv_transfers(&ar_usb->pipes[ATH6KL_USB_PIPE_RX_DATA],
ATH6KL_USB_RX_BUFFER_SIZE);
}
/* hif usb rx/tx completion functions */
static void ath6kl_usb_recv_complete(struct urb *urb)
{
struct ath6kl_urb_context *urb_context = urb->context;
struct ath6kl_usb_pipe *pipe = urb_context->pipe;
struct sk_buff *skb = NULL;
int status = 0;
ath6kl_dbg(ATH6KL_DBG_USB_BULK,
"%s: recv pipe: %d, stat:%d, len:%d urb:0x%p\n", __func__,
pipe->logical_pipe_num, urb->status, urb->actual_length,
urb);
if (urb->status != 0) {
status = -EIO;
switch (urb->status) {
case -ECONNRESET:
case -ENOENT:
case -ESHUTDOWN:
/*
* no need to spew these errors when device
* removed or urb killed due to driver shutdown
*/
status = -ECANCELED;
break;
default:
ath6kl_dbg(ATH6KL_DBG_USB_BULK,
"%s recv pipe: %d (ep:0x%2.2X), failed:%d\n",
__func__, pipe->logical_pipe_num,
pipe->ep_address, urb->status);
break;
}
goto cleanup_recv_urb;
}
if (urb->actual_length == 0)
goto cleanup_recv_urb;
skb = urb_context->skb;
/* we are going to pass it up */
urb_context->skb = NULL;
skb_put(skb, urb->actual_length);
/* note: queue implements a lock */
skb_queue_tail(&pipe->io_comp_queue, skb);
schedule_work(&pipe->io_complete_work);
cleanup_recv_urb:
ath6kl_usb_cleanup_recv_urb(urb_context);
if (status == 0 &&
pipe->urb_cnt >= pipe->urb_cnt_thresh) {
/* our free urbs are piling up, post more transfers */
ath6kl_usb_post_recv_transfers(pipe, ATH6KL_USB_RX_BUFFER_SIZE);
}
}
static void ath6kl_usb_usb_transmit_complete(struct urb *urb)
{
struct ath6kl_urb_context *urb_context = urb->context;
struct ath6kl_usb_pipe *pipe = urb_context->pipe;
struct sk_buff *skb;
ath6kl_dbg(ATH6KL_DBG_USB_BULK,
"%s: pipe: %d, stat:%d, len:%d\n",
__func__, pipe->logical_pipe_num, urb->status,
urb->actual_length);
if (urb->status != 0) {
ath6kl_dbg(ATH6KL_DBG_USB_BULK,
"%s: pipe: %d, failed:%d\n",
__func__, pipe->logical_pipe_num, urb->status);
}
skb = urb_context->skb;
urb_context->skb = NULL;
ath6kl_usb_free_urb_to_pipe(urb_context->pipe, urb_context);
/* note: queue implements a lock */
skb_queue_tail(&pipe->io_comp_queue, skb);
schedule_work(&pipe->io_complete_work);
}
static void ath6kl_usb_io_comp_work(struct work_struct *work)
{
struct ath6kl_usb_pipe *pipe = container_of(work,
struct ath6kl_usb_pipe,
io_complete_work);
struct ath6kl_usb *ar_usb;
struct sk_buff *skb;
ar_usb = pipe->ar_usb;
while ((skb = skb_dequeue(&pipe->io_comp_queue))) {
if (pipe->flags & ATH6KL_USB_PIPE_FLAG_TX) {
ath6kl_dbg(ATH6KL_DBG_USB_BULK,
"ath6kl usb xmit callback buf:0x%p\n", skb);
ath6kl_core_tx_complete(ar_usb->ar, skb);
} else {
ath6kl_dbg(ATH6KL_DBG_USB_BULK,
"ath6kl usb recv callback buf:0x%p\n", skb);
ath6kl_core_rx_complete(ar_usb->ar, skb,
pipe->logical_pipe_num);
}
}
}
#define ATH6KL_USB_MAX_DIAG_CMD (sizeof(struct ath6kl_usb_ctrl_diag_cmd_write))
#define ATH6KL_USB_MAX_DIAG_RESP (sizeof(struct ath6kl_usb_ctrl_diag_resp_read))
static void ath6kl_usb_destroy(struct ath6kl_usb *ar_usb)
{
ath6kl_usb_flush_all(ar_usb);
ath6kl_usb_cleanup_pipe_resources(ar_usb);
usb_set_intfdata(ar_usb->interface, NULL);
kfree(ar_usb->diag_cmd_buffer);
......@@ -70,19 +614,28 @@ static void ath6kl_usb_destroy(struct ath6kl_usb *ar_usb)
static struct ath6kl_usb *ath6kl_usb_create(struct usb_interface *interface)
{
struct ath6kl_usb *ar_usb = NULL;
struct usb_device *dev = interface_to_usbdev(interface);
struct ath6kl_usb *ar_usb;
struct ath6kl_usb_pipe *pipe;
int status = 0;
int i;
ar_usb = kzalloc(sizeof(struct ath6kl_usb), GFP_KERNEL);
if (ar_usb == NULL)
goto fail_ath6kl_usb_create;
memset(ar_usb, 0, sizeof(struct ath6kl_usb));
usb_set_intfdata(interface, ar_usb);
spin_lock_init(&(ar_usb->cs_lock));
ar_usb->udev = dev;
ar_usb->interface = interface;
for (i = 0; i < ATH6KL_USB_PIPE_MAX; i++) {
pipe = &ar_usb->pipes[i];
INIT_WORK(&pipe->io_complete_work,
ath6kl_usb_io_comp_work);
skb_queue_head_init(&pipe->io_comp_queue);
}
ar_usb->diag_cmd_buffer = kzalloc(ATH6KL_USB_MAX_DIAG_CMD, GFP_KERNEL);
if (ar_usb->diag_cmd_buffer == NULL) {
status = -ENOMEM;
......@@ -96,6 +649,8 @@ static struct ath6kl_usb *ath6kl_usb_create(struct usb_interface *interface)
goto fail_ath6kl_usb_create;
}
status = ath6kl_usb_setup_pipe_resources(ar_usb);
fail_ath6kl_usb_create:
if (status != 0) {
ath6kl_usb_destroy(ar_usb);
......@@ -114,11 +669,177 @@ static void ath6kl_usb_device_detached(struct usb_interface *interface)
ath6kl_stop_txrx(ar_usb->ar);
/* Delay to wait for the target to reboot */
mdelay(20);
ath6kl_core_cleanup(ar_usb->ar);
ath6kl_usb_destroy(ar_usb);
}
/* exported hif usb APIs for htc pipe */
static void hif_start(struct ath6kl *ar)
{
struct ath6kl_usb *device = ath6kl_usb_priv(ar);
int i;
ath6kl_usb_start_recv_pipes(device);
/* set the TX resource avail threshold for each TX pipe */
for (i = ATH6KL_USB_PIPE_TX_CTRL;
i <= ATH6KL_USB_PIPE_TX_DATA_HP; i++) {
device->pipes[i].urb_cnt_thresh =
device->pipes[i].urb_alloc / 2;
}
}
static int ath6kl_usb_send(struct ath6kl *ar, u8 PipeID,
struct sk_buff *hdr_skb, struct sk_buff *skb)
{
struct ath6kl_usb *device = ath6kl_usb_priv(ar);
struct ath6kl_usb_pipe *pipe = &device->pipes[PipeID];
struct ath6kl_urb_context *urb_context;
int usb_status, status = 0;
struct urb *urb;
u8 *data;
u32 len;
ath6kl_dbg(ATH6KL_DBG_USB_BULK, "+%s pipe : %d, buf:0x%p\n",
__func__, PipeID, skb);
urb_context = ath6kl_usb_alloc_urb_from_pipe(pipe);
if (urb_context == NULL) {
/*
* TODO: it is possible to run out of urbs if
* 2 endpoints map to the same pipe ID
*/
ath6kl_dbg(ATH6KL_DBG_USB_BULK,
"%s pipe:%d no urbs left. URB Cnt : %d\n",
__func__, PipeID, pipe->urb_cnt);
status = -ENOMEM;
goto fail_hif_send;
}
urb_context->skb = skb;
data = skb->data;
len = skb->len;
urb = usb_alloc_urb(0, GFP_ATOMIC);
if (urb == NULL) {
status = -ENOMEM;
ath6kl_usb_free_urb_to_pipe(urb_context->pipe,
urb_context);
goto fail_hif_send;
}
usb_fill_bulk_urb(urb,
device->udev,
pipe->usb_pipe_handle,
data,
len,
ath6kl_usb_usb_transmit_complete, urb_context);
if ((len % pipe->max_packet_size) == 0) {
/* hit a max packet boundary on this pipe */
urb->transfer_flags |= URB_ZERO_PACKET;
}
ath6kl_dbg(ATH6KL_DBG_USB_BULK,
"athusb bulk send submit:%d, 0x%X (ep:0x%2.2X), %d bytes\n",
pipe->logical_pipe_num, pipe->usb_pipe_handle,
pipe->ep_address, len);
usb_anchor_urb(urb, &pipe->urb_submitted);
usb_status = usb_submit_urb(urb, GFP_ATOMIC);
if (usb_status) {
ath6kl_dbg(ATH6KL_DBG_USB_BULK,
"ath6kl usb : usb bulk transmit failed %d\n",
usb_status);
usb_unanchor_urb(urb);
ath6kl_usb_free_urb_to_pipe(urb_context->pipe,
urb_context);
status = -EINVAL;
}
usb_free_urb(urb);
fail_hif_send:
return status;
}
static void hif_stop(struct ath6kl *ar)
{
struct ath6kl_usb *device = ath6kl_usb_priv(ar);
ath6kl_usb_flush_all(device);
}
static void ath6kl_usb_get_default_pipe(struct ath6kl *ar,
u8 *ul_pipe, u8 *dl_pipe)
{
*ul_pipe = ATH6KL_USB_PIPE_TX_CTRL;
*dl_pipe = ATH6KL_USB_PIPE_RX_CTRL;
}
static int ath6kl_usb_map_service_pipe(struct ath6kl *ar, u16 svc_id,
u8 *ul_pipe, u8 *dl_pipe)
{
int status = 0;
switch (svc_id) {
case HTC_CTRL_RSVD_SVC:
case WMI_CONTROL_SVC:
*ul_pipe = ATH6KL_USB_PIPE_TX_CTRL;
/* due to large control packets, shift to data pipe */
*dl_pipe = ATH6KL_USB_PIPE_RX_DATA;
break;
case WMI_DATA_BE_SVC:
case WMI_DATA_BK_SVC:
*ul_pipe = ATH6KL_USB_PIPE_TX_DATA_LP;
/*
* Disable rxdata2 directly, it will be enabled
* if FW enable rxdata2
*/
*dl_pipe = ATH6KL_USB_PIPE_RX_DATA;
break;
case WMI_DATA_VI_SVC:
*ul_pipe = ATH6KL_USB_PIPE_TX_DATA_MP;
/*
* Disable rxdata2 directly, it will be enabled
* if FW enable rxdata2
*/
*dl_pipe = ATH6KL_USB_PIPE_RX_DATA;
break;
case WMI_DATA_VO_SVC:
*ul_pipe = ATH6KL_USB_PIPE_TX_DATA_HP;
/*
* Disable rxdata2 directly, it will be enabled
* if FW enable rxdata2
*/
*dl_pipe = ATH6KL_USB_PIPE_RX_DATA;
break;
default:
status = -EPERM;
break;
}
return status;
}
static u16 ath6kl_usb_get_free_queue_number(struct ath6kl *ar, u8 pipe_id)
{
struct ath6kl_usb *device = ath6kl_usb_priv(ar);
return device->pipes[pipe_id].urb_cnt;
}
static void hif_detach_htc(struct ath6kl *ar)
{
struct ath6kl_usb *device = ath6kl_usb_priv(ar);
ath6kl_usb_flush_all(device);
}
static int ath6kl_usb_submit_ctrl_out(struct ath6kl_usb *ar_usb,
u8 req, u16 value, u16 index, void *data,
u32 size)
......@@ -301,14 +1022,21 @@ static int ath6kl_usb_bmi_write(struct ath6kl *ar, u8 *buf, u32 len)
static int ath6kl_usb_power_on(struct ath6kl *ar)
{
hif_start(ar);
return 0;
}
static int ath6kl_usb_power_off(struct ath6kl *ar)
{
hif_detach_htc(ar);
return 0;
}
static void ath6kl_usb_stop(struct ath6kl *ar)
{
hif_stop(ar);
}
static const struct ath6kl_hif_ops ath6kl_usb_ops = {
.diag_read32 = ath6kl_usb_diag_read32,
.diag_write32 = ath6kl_usb_diag_write32,
......@@ -316,6 +1044,11 @@ static const struct ath6kl_hif_ops ath6kl_usb_ops = {
.bmi_write = ath6kl_usb_bmi_write,
.power_on = ath6kl_usb_power_on,
.power_off = ath6kl_usb_power_off,
.stop = ath6kl_usb_stop,
.pipe_send = ath6kl_usb_send,
.pipe_get_default = ath6kl_usb_get_default_pipe,
.pipe_map_service = ath6kl_usb_map_service_pipe,
.pipe_get_free_queue_number = ath6kl_usb_get_free_queue_number,
};
/* ath6kl usb driver registered functions */
......@@ -368,7 +1101,7 @@ static int ath6kl_usb_probe(struct usb_interface *interface,
ar_usb->ar = ar;
ret = ath6kl_core_init(ar);
ret = ath6kl_core_init(ar, ATH6KL_HTC_TYPE_PIPE);
if (ret) {
ath6kl_err("Failed to init ath6kl core: %d\n", ret);
goto err_core_free;
......@@ -392,6 +1125,46 @@ static void ath6kl_usb_remove(struct usb_interface *interface)
ath6kl_usb_device_detached(interface);
}
#ifdef CONFIG_PM
static int ath6kl_usb_suspend(struct usb_interface *interface,
pm_message_t message)
{
struct ath6kl_usb *device;
device = usb_get_intfdata(interface);
ath6kl_usb_flush_all(device);
return 0;
}
static int ath6kl_usb_resume(struct usb_interface *interface)
{
struct ath6kl_usb *device;
device = usb_get_intfdata(interface);
ath6kl_usb_post_recv_transfers(&device->pipes[ATH6KL_USB_PIPE_RX_DATA],
ATH6KL_USB_RX_BUFFER_SIZE);
ath6kl_usb_post_recv_transfers(&device->pipes[ATH6KL_USB_PIPE_RX_DATA2],
ATH6KL_USB_RX_BUFFER_SIZE);
return 0;
}
static int ath6kl_usb_reset_resume(struct usb_interface *intf)
{
if (usb_get_intfdata(intf))
ath6kl_usb_remove(intf);
return 0;
}
#else
#define ath6kl_usb_suspend NULL
#define ath6kl_usb_resume NULL
#define ath6kl_usb_reset_resume NULL
#endif
/* table of devices that work with this driver */
static struct usb_device_id ath6kl_usb_ids[] = {
{USB_DEVICE(0x0cf3, 0x9374)},
......@@ -403,8 +1176,12 @@ MODULE_DEVICE_TABLE(usb, ath6kl_usb_ids);
static struct usb_driver ath6kl_usb_driver = {
.name = "ath6kl_usb",
.probe = ath6kl_usb_probe,
.suspend = ath6kl_usb_suspend,
.resume = ath6kl_usb_resume,
.reset_resume = ath6kl_usb_reset_resume,
.disconnect = ath6kl_usb_remove,
.id_table = ath6kl_usb_ids,
.supports_autosuspend = true,
};
static int ath6kl_usb_init(void)
......
......@@ -2882,6 +2882,43 @@ int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx,
return ret;
}
int ath6kl_wmi_set_htcap_cmd(struct wmi *wmi, u8 if_idx,
enum ieee80211_band band,
struct ath6kl_htcap *htcap)
{
struct sk_buff *skb;
struct wmi_set_htcap_cmd *cmd;
skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_set_htcap_cmd *) skb->data;
/*
* NOTE: Band in firmware matches enum ieee80211_band, it is unlikely
* this will be changed in firmware. If at all there is any change in
* band value, the host needs to be fixed.
*/
cmd->band = band;
cmd->ht_enable = !!htcap->ht_enable;
cmd->ht20_sgi = !!(htcap->cap_info & IEEE80211_HT_CAP_SGI_20);
cmd->ht40_supported =
!!(htcap->cap_info & IEEE80211_HT_CAP_SUP_WIDTH_20_40);
cmd->ht40_sgi = !!(htcap->cap_info & IEEE80211_HT_CAP_SGI_40);
cmd->intolerant_40mhz =
!!(htcap->cap_info & IEEE80211_HT_CAP_40MHZ_INTOLERANT);
cmd->max_ampdu_len_exp = htcap->ampdu_factor;
ath6kl_dbg(ATH6KL_DBG_WMI,
"Set htcap: band:%d ht_enable:%d 40mhz:%d sgi_20mhz:%d sgi_40mhz:%d 40mhz_intolerant:%d ampdu_len_exp:%d\n",
cmd->band, cmd->ht_enable, cmd->ht40_supported,
cmd->ht20_sgi, cmd->ht40_sgi, cmd->intolerant_40mhz,
cmd->max_ampdu_len_exp);
return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_HT_CAP_CMDID,
NO_SYNC_WMIFLAG);
}
int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len)
{
struct sk_buff *skb;
......@@ -3032,6 +3069,9 @@ int ath6kl_wmi_ap_set_mlme(struct wmi *wmip, u8 if_idx, u8 cmd, const u8 *mac,
cm->reason = cpu_to_le16(reason);
cm->cmd = cmd;
ath6kl_dbg(ATH6KL_DBG_WMI, "ap_set_mlme: cmd=%d reason=%d\n", cm->cmd,
cm->reason);
return ath6kl_wmi_cmd_send(wmip, if_idx, skb, WMI_AP_SET_MLME_CMDID,
NO_SYNC_WMIFLAG);
}
......@@ -3181,6 +3221,29 @@ int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type,
NO_SYNC_WMIFLAG);
}
int ath6kl_wmi_set_ie_cmd(struct wmi *wmi, u8 if_idx, u8 ie_id, u8 ie_field,
const u8 *ie_info, u8 ie_len)
{
struct sk_buff *skb;
struct wmi_set_ie_cmd *p;
skb = ath6kl_wmi_get_new_buf(sizeof(*p) + ie_len);
if (!skb)
return -ENOMEM;
ath6kl_dbg(ATH6KL_DBG_WMI, "set_ie_cmd: ie_id=%u ie_ie_field=%u ie_len=%u\n",
ie_id, ie_field, ie_len);
p = (struct wmi_set_ie_cmd *) skb->data;
p->ie_id = ie_id;
p->ie_field = ie_field;
p->ie_len = ie_len;
if (ie_info && ie_len > 0)
memcpy(p->ie_info, ie_info, ie_len);
return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_SET_IE_CMDID,
NO_SYNC_WMIFLAG);
}
int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable)
{
struct sk_buff *skb;
......@@ -3392,6 +3455,23 @@ int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx)
WMI_CANCEL_REMAIN_ON_CHNL_CMDID);
}
int ath6kl_wmi_set_inact_period(struct wmi *wmi, u8 if_idx, int inact_timeout)
{
struct sk_buff *skb;
struct wmi_set_inact_period_cmd *cmd;
skb = ath6kl_wmi_get_new_buf(sizeof(*cmd));
if (!skb)
return -ENOMEM;
cmd = (struct wmi_set_inact_period_cmd *) skb->data;
cmd->inact_period = cpu_to_le32(inact_timeout);
cmd->num_null_func = 0;
return ath6kl_wmi_cmd_send(wmi, if_idx, skb, WMI_AP_CONN_INACT_CMDID,
NO_SYNC_WMIFLAG);
}
static int ath6kl_wmi_control_rx_xtnd(struct wmi *wmi, struct sk_buff *skb)
{
struct wmix_cmd_hdr *cmd;
......
......@@ -182,6 +182,9 @@ enum wmi_data_hdr_flags {
#define WMI_DATA_HDR_META_MASK 0x7
#define WMI_DATA_HDR_META_SHIFT 13
#define WMI_DATA_HDR_PAD_BEFORE_DATA_MASK 0xFF
#define WMI_DATA_HDR_PAD_BEFORE_DATA_SHIFT 0x8
/* Macros for operating on WMI_DATA_HDR (info3) field */
#define WMI_DATA_HDR_IF_IDX_MASK 0xF
......@@ -423,6 +426,7 @@ enum wmi_cmd_id {
WMI_SET_FRAMERATES_CMDID,
WMI_SET_AP_PS_CMDID,
WMI_SET_QOS_SUPP_CMDID,
WMI_SET_IE_CMDID,
/* WMI_THIN_RESERVED_... mark the start and end
* values for WMI_THIN_RESERVED command IDs. These
......@@ -629,6 +633,11 @@ enum wmi_mgmt_frame_type {
WMI_NUM_MGMT_FRAME
};
enum wmi_ie_field_type {
WMI_RSN_IE_CAPB = 0x1,
WMI_IE_FULL = 0xFF, /* indicats full IE */
};
/* WMI_CONNECT_CMDID */
enum network_type {
INFRA_NETWORK = 0x01,
......@@ -1268,6 +1277,16 @@ struct wmi_mcast_filter_add_del_cmd {
u8 mcast_mac[ATH6KL_MCAST_FILTER_MAC_ADDR_SIZE];
} __packed;
struct wmi_set_htcap_cmd {
u8 band;
u8 ht_enable;
u8 ht40_supported;
u8 ht20_sgi;
u8 ht40_sgi;
u8 intolerant_40mhz;
u8 max_ampdu_len_exp;
} __packed;
/* Command Replies */
/* WMI_GET_CHANNEL_LIST_CMDID reply */
......@@ -1913,6 +1932,14 @@ struct wmi_set_appie_cmd {
u8 ie_info[0];
} __packed;
struct wmi_set_ie_cmd {
u8 ie_id;
u8 ie_field; /* enum wmi_ie_field_type */
u8 ie_len;
u8 reserved;
u8 ie_info[0];
} __packed;
/* Notify the WSC registration status to the target */
#define WSC_REG_ACTIVE 1
#define WSC_REG_INACTIVE 0
......@@ -2141,6 +2168,11 @@ struct wmi_ap_hidden_ssid_cmd {
u8 hidden_ssid;
} __packed;
struct wmi_set_inact_period_cmd {
__le32 inact_period;
u8 num_null_func;
} __packed;
/* AP mode events */
struct wmi_ap_set_apsd_cmd {
u8 enable;
......@@ -2465,6 +2497,9 @@ int ath6kl_wmi_get_roam_tbl_cmd(struct wmi *wmi);
int ath6kl_wmi_set_wmm_txop(struct wmi *wmi, u8 if_idx, enum wmi_txop_cfg cfg);
int ath6kl_wmi_set_keepalive_cmd(struct wmi *wmi, u8 if_idx,
u8 keep_alive_intvl);
int ath6kl_wmi_set_htcap_cmd(struct wmi *wmi, u8 if_idx,
enum ieee80211_band band,
struct ath6kl_htcap *htcap);
int ath6kl_wmi_test_cmd(struct wmi *wmi, void *buf, size_t len);
s32 ath6kl_wmi_get_rate(s8 rate_index);
......@@ -2515,6 +2550,9 @@ int ath6kl_wmi_set_rx_frame_format_cmd(struct wmi *wmi, u8 if_idx,
int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type,
const u8 *ie, u8 ie_len);
int ath6kl_wmi_set_ie_cmd(struct wmi *wmi, u8 if_idx, u8 ie_id, u8 ie_field,
const u8 *ie_info, u8 ie_len);
/* P2P */
int ath6kl_wmi_disable_11b_rates_cmd(struct wmi *wmi, bool disable);
......@@ -2538,6 +2576,8 @@ int ath6kl_wmi_cancel_remain_on_chnl_cmd(struct wmi *wmi, u8 if_idx);
int ath6kl_wmi_set_appie_cmd(struct wmi *wmi, u8 if_idx, u8 mgmt_frm_type,
const u8 *ie, u8 ie_len);
int ath6kl_wmi_set_inact_period(struct wmi *wmi, u8 if_idx, int inact_timeout);
void ath6kl_wmi_sscan_timer(unsigned long ptr);
struct ath6kl_vif *ath6kl_get_vif_by_index(struct ath6kl *ar, u8 if_idx);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment