Commit 48c6cc04 authored by John W. Linville's avatar John W. Linville
parents ba23d206 abae2386
...@@ -3695,7 +3695,7 @@ struct iwl_bt_uart_msg { ...@@ -3695,7 +3695,7 @@ struct iwl_bt_uart_msg {
u8 frame5; u8 frame5;
u8 frame6; u8 frame6;
u8 frame7; u8 frame7;
} __attribute__((packed)); } __packed;
struct iwl_bt_coex_profile_notif { struct iwl_bt_coex_profile_notif {
struct iwl_bt_uart_msg last_bt_uart_msg; struct iwl_bt_uart_msg last_bt_uart_msg;
...@@ -3703,7 +3703,7 @@ struct iwl_bt_coex_profile_notif { ...@@ -3703,7 +3703,7 @@ struct iwl_bt_coex_profile_notif {
u8 bt_traffic_load; /* 0 .. 3? */ u8 bt_traffic_load; /* 0 .. 3? */
u8 bt_ci_compliance; /* 0 - not complied, 1 - complied */ u8 bt_ci_compliance; /* 0 - not complied, 1 - complied */
u8 reserved; u8 reserved;
} __attribute__((packed)); } __packed;
#define IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS 0 #define IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_POS 0
#define IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_MSK 0x1 #define IWL_BT_COEX_PRIO_TBL_SHARED_ANTENNA_MSK 0x1
...@@ -3752,7 +3752,7 @@ enum bt_coex_prio_table_priorities { ...@@ -3752,7 +3752,7 @@ enum bt_coex_prio_table_priorities {
struct iwl_bt_coex_prio_table_cmd { struct iwl_bt_coex_prio_table_cmd {
u8 prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX]; u8 prio_tbl[BT_COEX_PRIO_TBL_EVT_MAX];
} __attribute__((packed)); } __packed;
#define IWL_BT_COEX_ENV_CLOSE 0 #define IWL_BT_COEX_ENV_CLOSE 0
#define IWL_BT_COEX_ENV_OPEN 1 #define IWL_BT_COEX_ENV_OPEN 1
...@@ -3764,7 +3764,7 @@ struct iwl_bt_coex_prot_env_cmd { ...@@ -3764,7 +3764,7 @@ struct iwl_bt_coex_prot_env_cmd {
u8 action; /* 0 = closed, 1 = open */ u8 action; /* 0 = closed, 1 = open */
u8 type; /* 0 .. 15 */ u8 type; /* 0 .. 15 */
u8 reserved[2]; u8 reserved[2];
} __attribute__((packed)); } __packed;
/* /*
* REPLY_D3_CONFIG * REPLY_D3_CONFIG
......
...@@ -157,7 +157,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file, ...@@ -157,7 +157,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
sram = priv->dbgfs_sram_offset & ~0x3; sram = priv->dbgfs_sram_offset & ~0x3;
/* read the first u32 from sram */ /* read the first u32 from sram */
val = iwl_read_targ_mem(priv->trans, sram); val = iwl_trans_read_mem32(priv->trans, sram);
for (; len; len--) { for (; len; len--) {
/* put the address at the start of every line */ /* put the address at the start of every line */
...@@ -176,7 +176,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file, ...@@ -176,7 +176,7 @@ static ssize_t iwl_dbgfs_sram_read(struct file *file,
if (++offset == 4) { if (++offset == 4) {
sram += 4; sram += 4;
offset = 0; offset = 0;
val = iwl_read_targ_mem(priv->trans, sram); val = iwl_trans_read_mem32(priv->trans, sram);
} }
/* put in extra spaces and split lines for human readability */ /* put in extra spaces and split lines for human readability */
......
...@@ -69,7 +69,7 @@ static const struct ieee80211_tpt_blink iwl_blink[] = { ...@@ -69,7 +69,7 @@ static const struct ieee80211_tpt_blink iwl_blink[] = {
/* Set led register off */ /* Set led register off */
void iwlagn_led_enable(struct iwl_priv *priv) void iwlagn_led_enable(struct iwl_priv *priv)
{ {
iwl_write32(priv->trans, CSR_LED_REG, CSR_LED_REG_TRUN_ON); iwl_write32(priv->trans, CSR_LED_REG, CSR_LED_REG_TURN_ON);
} }
/* /*
......
...@@ -459,11 +459,11 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw) ...@@ -459,11 +459,11 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
base = priv->device_pointers.error_event_table; base = priv->device_pointers.error_event_table;
if (iwlagn_hw_valid_rtc_data_addr(base)) { if (iwlagn_hw_valid_rtc_data_addr(base)) {
spin_lock_irqsave(&priv->trans->reg_lock, flags); spin_lock_irqsave(&priv->trans->reg_lock, flags);
ret = iwl_grab_nic_access_silent(priv->trans); if (iwl_trans_grab_nic_access(priv->trans, true)) {
if (likely(ret == 0)) {
iwl_write32(priv->trans, HBUS_TARG_MEM_RADDR, base); iwl_write32(priv->trans, HBUS_TARG_MEM_RADDR, base);
status = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT); status = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
iwl_release_nic_access(priv->trans); iwl_trans_release_nic_access(priv->trans);
ret = 0;
} }
spin_unlock_irqrestore(&priv->trans->reg_lock, flags); spin_unlock_irqrestore(&priv->trans->reg_lock, flags);
...@@ -479,7 +479,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw) ...@@ -479,7 +479,7 @@ static int iwlagn_mac_resume(struct ieee80211_hw *hw)
} }
if (priv->wowlan_sram) if (priv->wowlan_sram)
_iwl_read_targ_mem_dwords( iwl_trans_read_mem(
priv->trans, 0x800000, priv->trans, 0x800000,
priv->wowlan_sram, priv->wowlan_sram,
img->sec[IWL_UCODE_SECTION_DATA].len / 4); img->sec[IWL_UCODE_SECTION_DATA].len / 4);
......
...@@ -354,7 +354,7 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base, ...@@ -354,7 +354,7 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
/* Make sure device is powered up for SRAM reads */ /* Make sure device is powered up for SRAM reads */
spin_lock_irqsave(&priv->trans->reg_lock, reg_flags); spin_lock_irqsave(&priv->trans->reg_lock, reg_flags);
if (unlikely(!iwl_grab_nic_access(priv->trans))) { if (!iwl_trans_grab_nic_access(priv->trans, false)) {
spin_unlock_irqrestore(&priv->trans->reg_lock, reg_flags); spin_unlock_irqrestore(&priv->trans->reg_lock, reg_flags);
return; return;
} }
...@@ -388,7 +388,7 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base, ...@@ -388,7 +388,7 @@ static void iwl_print_cont_event_trace(struct iwl_priv *priv, u32 base,
} }
} }
/* Allow device to power down */ /* Allow device to power down */
iwl_release_nic_access(priv->trans); iwl_trans_release_nic_access(priv->trans);
spin_unlock_irqrestore(&priv->trans->reg_lock, reg_flags); spin_unlock_irqrestore(&priv->trans->reg_lock, reg_flags);
} }
...@@ -408,7 +408,8 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv) ...@@ -408,7 +408,8 @@ static void iwl_continuous_event_trace(struct iwl_priv *priv)
base = priv->device_pointers.log_event_table; base = priv->device_pointers.log_event_table;
if (iwlagn_hw_valid_rtc_data_addr(base)) { if (iwlagn_hw_valid_rtc_data_addr(base)) {
iwl_read_targ_mem_bytes(priv->trans, base, &read, sizeof(read)); iwl_trans_read_mem_bytes(priv->trans, base,
&read, sizeof(read));
capacity = read.capacity; capacity = read.capacity;
mode = read.mode; mode = read.mode;
num_wraps = read.wrap_counter; num_wraps = read.wrap_counter;
...@@ -1627,7 +1628,7 @@ static void iwl_dump_nic_error_log(struct iwl_priv *priv) ...@@ -1627,7 +1628,7 @@ static void iwl_dump_nic_error_log(struct iwl_priv *priv)
} }
/*TODO: Update dbgfs with ISR error stats obtained below */ /*TODO: Update dbgfs with ISR error stats obtained below */
iwl_read_targ_mem_bytes(trans, base, &table, sizeof(table)); iwl_trans_read_mem_bytes(trans, base, &table, sizeof(table));
if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) { if (ERROR_START_OFFSET <= table.valid * ERROR_ELEM_SIZE) {
IWL_ERR(trans, "Start IWL Error Log Dump:\n"); IWL_ERR(trans, "Start IWL Error Log Dump:\n");
...@@ -1717,7 +1718,7 @@ static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx, ...@@ -1717,7 +1718,7 @@ static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
/* Make sure device is powered up for SRAM reads */ /* Make sure device is powered up for SRAM reads */
spin_lock_irqsave(&trans->reg_lock, reg_flags); spin_lock_irqsave(&trans->reg_lock, reg_flags);
if (unlikely(!iwl_grab_nic_access(trans))) if (!iwl_trans_grab_nic_access(trans, false))
goto out_unlock; goto out_unlock;
/* Set starting address; reads will auto-increment */ /* Set starting address; reads will auto-increment */
...@@ -1756,7 +1757,7 @@ static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx, ...@@ -1756,7 +1757,7 @@ static int iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
} }
/* Allow device to power down */ /* Allow device to power down */
iwl_release_nic_access(trans); iwl_trans_release_nic_access(trans);
out_unlock: out_unlock:
spin_unlock_irqrestore(&trans->reg_lock, reg_flags); spin_unlock_irqrestore(&trans->reg_lock, reg_flags);
return pos; return pos;
...@@ -1835,10 +1836,10 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log, ...@@ -1835,10 +1836,10 @@ int iwl_dump_nic_event_log(struct iwl_priv *priv, bool full_log,
} }
/* event log header */ /* event log header */
capacity = iwl_read_targ_mem(trans, base); capacity = iwl_trans_read_mem32(trans, base);
mode = iwl_read_targ_mem(trans, base + (1 * sizeof(u32))); mode = iwl_trans_read_mem32(trans, base + (1 * sizeof(u32)));
num_wraps = iwl_read_targ_mem(trans, base + (2 * sizeof(u32))); num_wraps = iwl_trans_read_mem32(trans, base + (2 * sizeof(u32)));
next_entry = iwl_read_targ_mem(trans, base + (3 * sizeof(u32))); next_entry = iwl_trans_read_mem32(trans, base + (3 * sizeof(u32)));
if (capacity > logsize) { if (capacity > logsize) {
IWL_ERR(priv, "Log capacity %d is bogus, limit to %d " IWL_ERR(priv, "Log capacity %d is bogus, limit to %d "
......
...@@ -186,8 +186,8 @@ static void iwl_tt_check_exit_ct_kill(unsigned long data) ...@@ -186,8 +186,8 @@ static void iwl_tt_check_exit_ct_kill(unsigned long data)
} }
iwl_read32(priv->trans, CSR_UCODE_DRV_GP1); iwl_read32(priv->trans, CSR_UCODE_DRV_GP1);
spin_lock_irqsave(&priv->trans->reg_lock, flags); spin_lock_irqsave(&priv->trans->reg_lock, flags);
if (likely(iwl_grab_nic_access(priv->trans))) if (iwl_trans_grab_nic_access(priv->trans, false))
iwl_release_nic_access(priv->trans); iwl_trans_release_nic_access(priv->trans);
spin_unlock_irqrestore(&priv->trans->reg_lock, flags); spin_unlock_irqrestore(&priv->trans->reg_lock, flags);
/* Reschedule the ct_kill timer to occur in /* Reschedule the ct_kill timer to occur in
......
...@@ -541,9 +541,9 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, ...@@ -541,9 +541,9 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
spin_lock_bh(&priv->sta_lock); spin_lock_bh(&priv->sta_lock);
tid_data = &priv->tid_data[sta_id][tid]; tid_data = &priv->tid_data[sta_id][tid];
txq_id = priv->tid_data[sta_id][tid].agg.txq_id; txq_id = tid_data->agg.txq_id;
switch (priv->tid_data[sta_id][tid].agg.state) { switch (tid_data->agg.state) {
case IWL_EMPTYING_HW_QUEUE_ADDBA: case IWL_EMPTYING_HW_QUEUE_ADDBA:
/* /*
* This can happen if the peer stops aggregation * This can happen if the peer stops aggregation
...@@ -563,9 +563,9 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, ...@@ -563,9 +563,9 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
case IWL_AGG_ON: case IWL_AGG_ON:
break; break;
default: default:
IWL_WARN(priv, "Stopping AGG while state not ON " IWL_WARN(priv,
"or starting for %d on %d (%d)\n", sta_id, tid, "Stopping AGG while state not ON or starting for %d on %d (%d)\n",
priv->tid_data[sta_id][tid].agg.state); sta_id, tid, tid_data->agg.state);
spin_unlock_bh(&priv->sta_lock); spin_unlock_bh(&priv->sta_lock);
return 0; return 0;
} }
...@@ -578,12 +578,11 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, ...@@ -578,12 +578,11 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
"stopping AGG on STA/TID %d/%d but hwq %d not used\n", "stopping AGG on STA/TID %d/%d but hwq %d not used\n",
sta_id, tid, txq_id); sta_id, tid, txq_id);
} else if (tid_data->agg.ssn != tid_data->next_reclaimed) { } else if (tid_data->agg.ssn != tid_data->next_reclaimed) {
IWL_DEBUG_TX_QUEUES(priv, "Can't proceed: ssn %d, " IWL_DEBUG_TX_QUEUES(priv,
"next_recl = %d\n", "Can't proceed: ssn %d, next_recl = %d\n",
tid_data->agg.ssn, tid_data->agg.ssn,
tid_data->next_reclaimed); tid_data->next_reclaimed);
priv->tid_data[sta_id][tid].agg.state = tid_data->agg.state = IWL_EMPTYING_HW_QUEUE_DELBA;
IWL_EMPTYING_HW_QUEUE_DELBA;
spin_unlock_bh(&priv->sta_lock); spin_unlock_bh(&priv->sta_lock);
return 0; return 0;
} }
...@@ -591,8 +590,8 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif, ...@@ -591,8 +590,8 @@ int iwlagn_tx_agg_stop(struct iwl_priv *priv, struct ieee80211_vif *vif,
IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n", IWL_DEBUG_TX_QUEUES(priv, "Can proceed: ssn = next_recl = %d\n",
tid_data->agg.ssn); tid_data->agg.ssn);
turn_off: turn_off:
agg_state = priv->tid_data[sta_id][tid].agg.state; agg_state = tid_data->agg.state;
priv->tid_data[sta_id][tid].agg.state = IWL_AGG_OFF; tid_data->agg.state = IWL_AGG_OFF;
spin_unlock_bh(&priv->sta_lock); spin_unlock_bh(&priv->sta_lock);
......
...@@ -286,89 +286,6 @@ static int iwl_alive_notify(struct iwl_priv *priv) ...@@ -286,89 +286,6 @@ static int iwl_alive_notify(struct iwl_priv *priv)
return iwl_send_calib_results(priv); return iwl_send_calib_results(priv);
} }
/**
* iwl_verify_inst_sparse - verify runtime uCode image in card vs. host,
* using sample data 100 bytes apart. If these sample points are good,
* it's a pretty good bet that everything between them is good, too.
*/
static int iwl_verify_sec_sparse(struct iwl_priv *priv,
const struct fw_desc *fw_desc)
{
__le32 *image = (__le32 *)fw_desc->data;
u32 len = fw_desc->len;
u32 val;
u32 i;
IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
for (i = 0; i < len; i += 100, image += 100/sizeof(u32)) {
/* read data comes through single port, auto-incr addr */
/* NOTE: Use the debugless read so we don't flood kernel log
* if IWL_DL_IO is set */
iwl_write_direct32(priv->trans, HBUS_TARG_MEM_RADDR,
i + fw_desc->offset);
val = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
if (val != le32_to_cpu(*image))
return -EIO;
}
return 0;
}
static void iwl_print_mismatch_sec(struct iwl_priv *priv,
const struct fw_desc *fw_desc)
{
__le32 *image = (__le32 *)fw_desc->data;
u32 len = fw_desc->len;
u32 val;
u32 offs;
int errors = 0;
IWL_DEBUG_FW(priv, "ucode inst image size is %u\n", len);
iwl_write_direct32(priv->trans, HBUS_TARG_MEM_RADDR,
fw_desc->offset);
for (offs = 0;
offs < len && errors < 20;
offs += sizeof(u32), image++) {
/* read data comes through single port, auto-incr addr */
val = iwl_read32(priv->trans, HBUS_TARG_MEM_RDAT);
if (val != le32_to_cpu(*image)) {
IWL_ERR(priv, "uCode INST section at "
"offset 0x%x, is 0x%x, s/b 0x%x\n",
offs, val, le32_to_cpu(*image));
errors++;
}
}
}
/**
* iwl_verify_ucode - determine which instruction image is in SRAM,
* and verify its contents
*/
static int iwl_verify_ucode(struct iwl_priv *priv,
enum iwl_ucode_type ucode_type)
{
const struct fw_img *img = iwl_get_ucode_image(priv, ucode_type);
if (!img) {
IWL_ERR(priv, "Invalid ucode requested (%d)\n", ucode_type);
return -EINVAL;
}
if (!iwl_verify_sec_sparse(priv, &img->sec[IWL_UCODE_SECTION_INST])) {
IWL_DEBUG_FW(priv, "uCode is good in inst SRAM\n");
return 0;
}
IWL_ERR(priv, "UCODE IMAGE IN INSTRUCTION SRAM NOT VALID!!\n");
iwl_print_mismatch_sec(priv, &img->sec[IWL_UCODE_SECTION_INST]);
return -EIO;
}
struct iwl_alive_data { struct iwl_alive_data {
bool valid; bool valid;
u8 subtype; u8 subtype;
...@@ -426,7 +343,7 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv, ...@@ -426,7 +343,7 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
alive_cmd, ARRAY_SIZE(alive_cmd), alive_cmd, ARRAY_SIZE(alive_cmd),
iwl_alive_fn, &alive_data); iwl_alive_fn, &alive_data);
ret = iwl_trans_start_fw(priv->trans, fw); ret = iwl_trans_start_fw(priv->trans, fw, false);
if (ret) { if (ret) {
priv->cur_ucode = old_type; priv->cur_ucode = old_type;
iwl_remove_notification(&priv->notif_wait, &alive_wait); iwl_remove_notification(&priv->notif_wait, &alive_wait);
...@@ -450,18 +367,7 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv, ...@@ -450,18 +367,7 @@ int iwl_load_ucode_wait_alive(struct iwl_priv *priv,
return -EIO; return -EIO;
} }
/*
* This step takes a long time (60-80ms!!) and
* WoWLAN image should be loaded quickly, so
* skip it for WoWLAN.
*/
if (ucode_type != IWL_UCODE_WOWLAN) { if (ucode_type != IWL_UCODE_WOWLAN) {
ret = iwl_verify_ucode(priv, ucode_type);
if (ret) {
priv->cur_ucode = old_type;
return ret;
}
/* delay a bit to give rfkill time to run */ /* delay a bit to give rfkill time to run */
msleep(5); msleep(5);
} }
......
...@@ -381,8 +381,8 @@ ...@@ -381,8 +381,8 @@
/* LED */ /* LED */
#define CSR_LED_BSM_CTRL_MSK (0xFFFFFFDF) #define CSR_LED_BSM_CTRL_MSK (0xFFFFFFDF)
#define CSR_LED_REG_TRUN_ON (0x78) #define CSR_LED_REG_TURN_ON (0x60)
#define CSR_LED_REG_TRUN_OFF (0x38) #define CSR_LED_REG_TURN_OFF (0x20)
/* ANA_PLL */ /* ANA_PLL */
#define CSR50_ANA_PLL_CFG_VAL (0x00880300) #define CSR50_ANA_PLL_CFG_VAL (0x00880300)
......
...@@ -35,12 +35,12 @@ ...@@ -35,12 +35,12 @@
#define IWL_POLL_INTERVAL 10 /* microseconds */ #define IWL_POLL_INTERVAL 10 /* microseconds */
static inline void __iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask) void __iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask)
{ {
iwl_write32(trans, reg, iwl_read32(trans, reg) | mask); iwl_write32(trans, reg, iwl_read32(trans, reg) | mask);
} }
static inline void __iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask) void __iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask)
{ {
iwl_write32(trans, reg, iwl_read32(trans, reg) & ~mask); iwl_write32(trans, reg, iwl_read32(trans, reg) & ~mask);
} }
...@@ -99,86 +99,16 @@ int iwl_poll_bit(struct iwl_trans *trans, u32 addr, ...@@ -99,86 +99,16 @@ int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
} }
EXPORT_SYMBOL_GPL(iwl_poll_bit); EXPORT_SYMBOL_GPL(iwl_poll_bit);
int iwl_grab_nic_access_silent(struct iwl_trans *trans)
{
int ret;
lockdep_assert_held(&trans->reg_lock);
/* this bit wakes up the NIC */
__iwl_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/*
* These bits say the device is running, and should keep running for
* at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
* but they do not indicate that embedded SRAM is restored yet;
* 3945 and 4965 have volatile SRAM, and must save/restore contents
* to/from host DRAM when sleeping/waking for power-saving.
* Each direction takes approximately 1/4 millisecond; with this
* overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
* series of register accesses are expected (e.g. reading Event Log),
* to keep device from sleeping.
*
* CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
* SRAM is okay/restored. We don't check that here because this call
* is just for hardware register access; but GP1 MAC_SLEEP check is a
* good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
*
* 5000 series and later (including 1000 series) have non-volatile SRAM,
* and do not save/restore SRAM when power cycling.
*/
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
(CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
if (ret < 0) {
iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
return -EIO;
}
return 0;
}
EXPORT_SYMBOL_GPL(iwl_grab_nic_access_silent);
bool iwl_grab_nic_access(struct iwl_trans *trans)
{
int ret = iwl_grab_nic_access_silent(trans);
if (unlikely(ret)) {
u32 val = iwl_read32(trans, CSR_GP_CNTRL);
WARN_ONCE(1, "Timeout waiting for hardware access "
"(CSR_GP_CNTRL 0x%08x)\n", val);
return false;
}
return true;
}
EXPORT_SYMBOL_GPL(iwl_grab_nic_access);
void iwl_release_nic_access(struct iwl_trans *trans)
{
lockdep_assert_held(&trans->reg_lock);
__iwl_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/*
* Above we read the CSR_GP_CNTRL register, which will flush
* any previous writes, but we need the write that clears the
* MAC_ACCESS_REQ bit to be performed before any other writes
* scheduled on different CPUs (after we drop reg_lock).
*/
mmiowb();
}
EXPORT_SYMBOL_GPL(iwl_release_nic_access);
u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg) u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg)
{ {
u32 value; u32 value = 0x5a5a5a5a;
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&trans->reg_lock, flags); spin_lock_irqsave(&trans->reg_lock, flags);
iwl_grab_nic_access(trans); if (iwl_trans_grab_nic_access(trans, false)) {
value = iwl_read32(trans, reg); value = iwl_read32(trans, reg);
iwl_release_nic_access(trans); iwl_trans_release_nic_access(trans);
}
spin_unlock_irqrestore(&trans->reg_lock, flags); spin_unlock_irqrestore(&trans->reg_lock, flags);
return value; return value;
...@@ -190,9 +120,9 @@ void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value) ...@@ -190,9 +120,9 @@ void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&trans->reg_lock, flags); spin_lock_irqsave(&trans->reg_lock, flags);
if (likely(iwl_grab_nic_access(trans))) { if (iwl_trans_grab_nic_access(trans, false)) {
iwl_write32(trans, reg, value); iwl_write32(trans, reg, value);
iwl_release_nic_access(trans); iwl_trans_release_nic_access(trans);
} }
spin_unlock_irqrestore(&trans->reg_lock, flags); spin_unlock_irqrestore(&trans->reg_lock, flags);
} }
...@@ -230,12 +160,13 @@ static inline void __iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val) ...@@ -230,12 +160,13 @@ static inline void __iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs) u32 iwl_read_prph(struct iwl_trans *trans, u32 ofs)
{ {
unsigned long flags; unsigned long flags;
u32 val; u32 val = 0x5a5a5a5a;
spin_lock_irqsave(&trans->reg_lock, flags); spin_lock_irqsave(&trans->reg_lock, flags);
iwl_grab_nic_access(trans); if (iwl_trans_grab_nic_access(trans, false)) {
val = __iwl_read_prph(trans, ofs); val = __iwl_read_prph(trans, ofs);
iwl_release_nic_access(trans); iwl_trans_release_nic_access(trans);
}
spin_unlock_irqrestore(&trans->reg_lock, flags); spin_unlock_irqrestore(&trans->reg_lock, flags);
return val; return val;
} }
...@@ -246,9 +177,9 @@ void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val) ...@@ -246,9 +177,9 @@ void iwl_write_prph(struct iwl_trans *trans, u32 ofs, u32 val)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&trans->reg_lock, flags); spin_lock_irqsave(&trans->reg_lock, flags);
if (likely(iwl_grab_nic_access(trans))) { if (iwl_trans_grab_nic_access(trans, false)) {
__iwl_write_prph(trans, ofs, val); __iwl_write_prph(trans, ofs, val);
iwl_release_nic_access(trans); iwl_trans_release_nic_access(trans);
} }
spin_unlock_irqrestore(&trans->reg_lock, flags); spin_unlock_irqrestore(&trans->reg_lock, flags);
} }
...@@ -259,10 +190,10 @@ void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask) ...@@ -259,10 +190,10 @@ void iwl_set_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&trans->reg_lock, flags); spin_lock_irqsave(&trans->reg_lock, flags);
if (likely(iwl_grab_nic_access(trans))) { if (iwl_trans_grab_nic_access(trans, false)) {
__iwl_write_prph(trans, ofs, __iwl_write_prph(trans, ofs,
__iwl_read_prph(trans, ofs) | mask); __iwl_read_prph(trans, ofs) | mask);
iwl_release_nic_access(trans); iwl_trans_release_nic_access(trans);
} }
spin_unlock_irqrestore(&trans->reg_lock, flags); spin_unlock_irqrestore(&trans->reg_lock, flags);
} }
...@@ -274,10 +205,10 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs, ...@@ -274,10 +205,10 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&trans->reg_lock, flags); spin_lock_irqsave(&trans->reg_lock, flags);
if (likely(iwl_grab_nic_access(trans))) { if (iwl_trans_grab_nic_access(trans, false)) {
__iwl_write_prph(trans, ofs, __iwl_write_prph(trans, ofs,
(__iwl_read_prph(trans, ofs) & mask) | bits); (__iwl_read_prph(trans, ofs) & mask) | bits);
iwl_release_nic_access(trans); iwl_trans_release_nic_access(trans);
} }
spin_unlock_irqrestore(&trans->reg_lock, flags); spin_unlock_irqrestore(&trans->reg_lock, flags);
} }
...@@ -289,66 +220,11 @@ void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask) ...@@ -289,66 +220,11 @@ void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask)
u32 val; u32 val;
spin_lock_irqsave(&trans->reg_lock, flags); spin_lock_irqsave(&trans->reg_lock, flags);
if (likely(iwl_grab_nic_access(trans))) { if (iwl_trans_grab_nic_access(trans, false)) {
val = __iwl_read_prph(trans, ofs); val = __iwl_read_prph(trans, ofs);
__iwl_write_prph(trans, ofs, (val & ~mask)); __iwl_write_prph(trans, ofs, (val & ~mask));
iwl_release_nic_access(trans); iwl_trans_release_nic_access(trans);
} }
spin_unlock_irqrestore(&trans->reg_lock, flags); spin_unlock_irqrestore(&trans->reg_lock, flags);
} }
EXPORT_SYMBOL_GPL(iwl_clear_bits_prph); EXPORT_SYMBOL_GPL(iwl_clear_bits_prph);
void _iwl_read_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
void *buf, int dwords)
{
unsigned long flags;
int offs;
u32 *vals = buf;
spin_lock_irqsave(&trans->reg_lock, flags);
if (likely(iwl_grab_nic_access(trans))) {
iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
for (offs = 0; offs < dwords; offs++)
vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
iwl_release_nic_access(trans);
}
spin_unlock_irqrestore(&trans->reg_lock, flags);
}
EXPORT_SYMBOL_GPL(_iwl_read_targ_mem_dwords);
u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr)
{
u32 value;
_iwl_read_targ_mem_dwords(trans, addr, &value, 1);
return value;
}
EXPORT_SYMBOL_GPL(iwl_read_targ_mem);
int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
const void *buf, int dwords)
{
unsigned long flags;
int offs, result = 0;
const u32 *vals = buf;
spin_lock_irqsave(&trans->reg_lock, flags);
if (likely(iwl_grab_nic_access(trans))) {
iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
for (offs = 0; offs < dwords; offs++)
iwl_write32(trans, HBUS_TARG_MEM_WDAT, vals[offs]);
iwl_release_nic_access(trans);
} else
result = -EBUSY;
spin_unlock_irqrestore(&trans->reg_lock, flags);
return result;
}
EXPORT_SYMBOL_GPL(_iwl_write_targ_mem_dwords);
int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val)
{
return _iwl_write_targ_mem_dwords(trans, addr, &val, 1);
}
EXPORT_SYMBOL_GPL(iwl_write_targ_mem);
...@@ -53,6 +53,8 @@ static inline u32 iwl_read32(struct iwl_trans *trans, u32 ofs) ...@@ -53,6 +53,8 @@ static inline u32 iwl_read32(struct iwl_trans *trans, u32 ofs)
void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask); void iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask);
void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask); void iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask);
void __iwl_set_bit(struct iwl_trans *trans, u32 reg, u32 mask);
void __iwl_clear_bit(struct iwl_trans *trans, u32 reg, u32 mask);
void iwl_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value); void iwl_set_bits_mask(struct iwl_trans *trans, u32 reg, u32 mask, u32 value);
...@@ -61,10 +63,6 @@ int iwl_poll_bit(struct iwl_trans *trans, u32 addr, ...@@ -61,10 +63,6 @@ int iwl_poll_bit(struct iwl_trans *trans, u32 addr,
int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask, int iwl_poll_direct_bit(struct iwl_trans *trans, u32 addr, u32 mask,
int timeout); int timeout);
int iwl_grab_nic_access_silent(struct iwl_trans *trans);
bool iwl_grab_nic_access(struct iwl_trans *trans);
void iwl_release_nic_access(struct iwl_trans *trans);
u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg); u32 iwl_read_direct32(struct iwl_trans *trans, u32 reg);
void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value); void iwl_write_direct32(struct iwl_trans *trans, u32 reg, u32 value);
...@@ -76,19 +74,4 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs, ...@@ -76,19 +74,4 @@ void iwl_set_bits_mask_prph(struct iwl_trans *trans, u32 ofs,
u32 bits, u32 mask); u32 bits, u32 mask);
void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask); void iwl_clear_bits_prph(struct iwl_trans *trans, u32 ofs, u32 mask);
void _iwl_read_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
void *buf, int dwords);
#define iwl_read_targ_mem_bytes(trans, addr, buf, bufsize) \
do { \
BUILD_BUG_ON((bufsize) % sizeof(u32)); \
_iwl_read_targ_mem_dwords(trans, addr, buf, \
(bufsize) / sizeof(u32));\
} while (0)
int _iwl_write_targ_mem_dwords(struct iwl_trans *trans, u32 addr,
const void *buf, int dwords);
u32 iwl_read_targ_mem(struct iwl_trans *trans, u32 addr);
int iwl_write_targ_mem(struct iwl_trans *trans, u32 addr, u32 val);
#endif #endif
...@@ -467,18 +467,20 @@ static int iwl_test_indirect_read(struct iwl_test *tst, u32 addr, u32 size) ...@@ -467,18 +467,20 @@ static int iwl_test_indirect_read(struct iwl_test *tst, u32 addr, u32 size)
if (IWL_ABS_PRPH_START <= addr && if (IWL_ABS_PRPH_START <= addr &&
addr < IWL_ABS_PRPH_START + PRPH_END) { addr < IWL_ABS_PRPH_START + PRPH_END) {
spin_lock_irqsave(&trans->reg_lock, flags); spin_lock_irqsave(&trans->reg_lock, flags);
iwl_grab_nic_access(trans); if (!iwl_trans_grab_nic_access(trans, false)) {
spin_unlock_irqrestore(&trans->reg_lock, flags);
return -EIO;
}
iwl_write32(trans, HBUS_TARG_PRPH_RADDR, iwl_write32(trans, HBUS_TARG_PRPH_RADDR,
addr | (3 << 24)); addr | (3 << 24));
for (i = 0; i < size; i += 4) for (i = 0; i < size; i += 4)
*(u32 *)(tst->mem.addr + i) = *(u32 *)(tst->mem.addr + i) =
iwl_read32(trans, HBUS_TARG_PRPH_RDAT); iwl_read32(trans, HBUS_TARG_PRPH_RDAT);
iwl_release_nic_access(trans); iwl_trans_release_nic_access(trans);
spin_unlock_irqrestore(&trans->reg_lock, flags); spin_unlock_irqrestore(&trans->reg_lock, flags);
} else { /* target memory (SRAM) */ } else { /* target memory (SRAM) */
_iwl_read_targ_mem_dwords(trans, addr, iwl_trans_read_mem(trans, addr, tst->mem.addr,
tst->mem.addr, tst->mem.size / 4);
tst->mem.size / 4);
} }
tst->mem.nchunks = tst->mem.nchunks =
...@@ -501,28 +503,31 @@ static int iwl_test_indirect_write(struct iwl_test *tst, u32 addr, ...@@ -501,28 +503,31 @@ static int iwl_test_indirect_write(struct iwl_test *tst, u32 addr,
if (IWL_ABS_PRPH_START <= addr && if (IWL_ABS_PRPH_START <= addr &&
addr < IWL_ABS_PRPH_START + PRPH_END) { addr < IWL_ABS_PRPH_START + PRPH_END) {
/* Periphery writes can be 1-3 bytes long, or DWORDs */ /* Periphery writes can be 1-3 bytes long, or DWORDs */
if (size < 4) { if (size < 4) {
memcpy(&val, buf, size); memcpy(&val, buf, size);
spin_lock_irqsave(&trans->reg_lock, flags); spin_lock_irqsave(&trans->reg_lock, flags);
iwl_grab_nic_access(trans); if (!iwl_trans_grab_nic_access(trans, false)) {
iwl_write32(trans, HBUS_TARG_PRPH_WADDR,
(addr & 0x0000FFFF) |
((size - 1) << 24));
iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
iwl_release_nic_access(trans);
/* needed after consecutive writes w/o read */
mmiowb();
spin_unlock_irqrestore(&trans->reg_lock, flags); spin_unlock_irqrestore(&trans->reg_lock, flags);
} else { return -EIO;
if (size % 4)
return -EINVAL;
for (i = 0; i < size; i += 4)
iwl_write_prph(trans, addr+i,
*(u32 *)(buf+i));
} }
iwl_write32(trans, HBUS_TARG_PRPH_WADDR,
(addr & 0x0000FFFF) |
((size - 1) << 24));
iwl_write32(trans, HBUS_TARG_PRPH_WDAT, val);
iwl_trans_release_nic_access(trans);
/* needed after consecutive writes w/o read */
mmiowb();
spin_unlock_irqrestore(&trans->reg_lock, flags);
} else {
if (size % 4)
return -EINVAL;
for (i = 0; i < size; i += 4)
iwl_write_prph(trans, addr+i,
*(u32 *)(buf+i));
}
} else if (iwl_test_valid_hw_addr(tst, addr)) { } else if (iwl_test_valid_hw_addr(tst, addr)) {
_iwl_write_targ_mem_dwords(trans, addr, buf, size / 4); iwl_trans_write_mem(trans, addr, buf, size / 4);
} else { } else {
return -EINVAL; return -EINVAL;
} }
......
...@@ -321,6 +321,8 @@ static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r) ...@@ -321,6 +321,8 @@ static inline struct page *rxb_steal_page(struct iwl_rx_cmd_buffer *r)
* @n_no_reclaim_cmds: # of commands in list * @n_no_reclaim_cmds: # of commands in list
* @rx_buf_size_8k: 8 kB RX buffer size needed for A-MSDUs, * @rx_buf_size_8k: 8 kB RX buffer size needed for A-MSDUs,
* if unset 4k will be the RX buffer size * if unset 4k will be the RX buffer size
* @bc_table_dword: set to true if the BC table expects the byte count to be
* in DWORD (as opposed to bytes)
* @queue_watchdog_timeout: time (in ms) after which queues * @queue_watchdog_timeout: time (in ms) after which queues
* are considered stuck and will trigger device restart * are considered stuck and will trigger device restart
* @command_names: array of command names, must be 256 entries * @command_names: array of command names, must be 256 entries
...@@ -335,6 +337,7 @@ struct iwl_trans_config { ...@@ -335,6 +337,7 @@ struct iwl_trans_config {
int n_no_reclaim_cmds; int n_no_reclaim_cmds;
bool rx_buf_size_8k; bool rx_buf_size_8k;
bool bc_table_dword;
unsigned int queue_watchdog_timeout; unsigned int queue_watchdog_timeout;
const char **command_names; const char **command_names;
}; };
...@@ -387,16 +390,21 @@ struct iwl_trans; ...@@ -387,16 +390,21 @@ struct iwl_trans;
* @read32: read a u32 register at offset ofs from the BAR * @read32: read a u32 register at offset ofs from the BAR
* @read_prph: read a DWORD from a periphery register * @read_prph: read a DWORD from a periphery register
* @write_prph: write a DWORD to a periphery register * @write_prph: write a DWORD to a periphery register
* @read_mem: read device's SRAM in DWORD
* @write_mem: write device's SRAM in DWORD
* @configure: configure parameters required by the transport layer from * @configure: configure parameters required by the transport layer from
* the op_mode. May be called several times before start_fw, can't be * the op_mode. May be called several times before start_fw, can't be
* called after that. * called after that.
* @set_pmi: set the power pmi state * @set_pmi: set the power pmi state
* @grab_nic_access: wake the NIC to be able to access non-HBUS regs
* @release_nic_access: let the NIC go to sleep
*/ */
struct iwl_trans_ops { struct iwl_trans_ops {
int (*start_hw)(struct iwl_trans *iwl_trans); int (*start_hw)(struct iwl_trans *iwl_trans);
void (*stop_hw)(struct iwl_trans *iwl_trans, bool op_mode_leaving); void (*stop_hw)(struct iwl_trans *iwl_trans, bool op_mode_leaving);
int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw); int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
bool run_in_rfkill);
void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr); void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
void (*stop_device)(struct iwl_trans *trans); void (*stop_device)(struct iwl_trans *trans);
...@@ -424,9 +432,15 @@ struct iwl_trans_ops { ...@@ -424,9 +432,15 @@ struct iwl_trans_ops {
u32 (*read32)(struct iwl_trans *trans, u32 ofs); u32 (*read32)(struct iwl_trans *trans, u32 ofs);
u32 (*read_prph)(struct iwl_trans *trans, u32 ofs); u32 (*read_prph)(struct iwl_trans *trans, u32 ofs);
void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val); void (*write_prph)(struct iwl_trans *trans, u32 ofs, u32 val);
int (*read_mem)(struct iwl_trans *trans, u32 addr,
void *buf, int dwords);
int (*write_mem)(struct iwl_trans *trans, u32 addr,
void *buf, int dwords);
void (*configure)(struct iwl_trans *trans, void (*configure)(struct iwl_trans *trans,
const struct iwl_trans_config *trans_cfg); const struct iwl_trans_config *trans_cfg);
void (*set_pmi)(struct iwl_trans *trans, bool state); void (*set_pmi)(struct iwl_trans *trans, bool state);
bool (*grab_nic_access)(struct iwl_trans *trans, bool silent);
void (*release_nic_access)(struct iwl_trans *trans);
}; };
/** /**
...@@ -528,13 +542,14 @@ static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr) ...@@ -528,13 +542,14 @@ static inline void iwl_trans_fw_alive(struct iwl_trans *trans, u32 scd_addr)
} }
static inline int iwl_trans_start_fw(struct iwl_trans *trans, static inline int iwl_trans_start_fw(struct iwl_trans *trans,
const struct fw_img *fw) const struct fw_img *fw,
bool run_in_rfkill)
{ {
might_sleep(); might_sleep();
WARN_ON_ONCE(!trans->rx_mpdu_cmd); WARN_ON_ONCE(!trans->rx_mpdu_cmd);
return trans->ops->start_fw(trans, fw); return trans->ops->start_fw(trans, fw, run_in_rfkill);
} }
static inline void iwl_trans_stop_device(struct iwl_trans *trans) static inline void iwl_trans_stop_device(struct iwl_trans *trans)
...@@ -636,7 +651,7 @@ static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans) ...@@ -636,7 +651,7 @@ static inline int iwl_trans_wait_tx_queue_empty(struct iwl_trans *trans)
} }
static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans, static inline int iwl_trans_dbgfs_register(struct iwl_trans *trans,
struct dentry *dir) struct dentry *dir)
{ {
return trans->ops->dbgfs_register(trans, dir); return trans->ops->dbgfs_register(trans, dir);
} }
...@@ -679,11 +694,57 @@ static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs, ...@@ -679,11 +694,57 @@ static inline void iwl_trans_write_prph(struct iwl_trans *trans, u32 ofs,
return trans->ops->write_prph(trans, ofs, val); return trans->ops->write_prph(trans, ofs, val);
} }
static inline int iwl_trans_read_mem(struct iwl_trans *trans, u32 addr,
void *buf, int dwords)
{
return trans->ops->read_mem(trans, addr, buf, dwords);
}
#define iwl_trans_read_mem_bytes(trans, addr, buf, bufsize) \
do { \
if (__builtin_constant_p(bufsize)) \
BUILD_BUG_ON((bufsize) % sizeof(u32)); \
iwl_trans_read_mem(trans, addr, buf, (bufsize) / sizeof(u32));\
} while (0)
static inline u32 iwl_trans_read_mem32(struct iwl_trans *trans, u32 addr)
{
u32 value;
if (WARN_ON(iwl_trans_read_mem(trans, addr, &value, 1)))
return 0xa5a5a5a5;
return value;
}
static inline int iwl_trans_write_mem(struct iwl_trans *trans, u32 addr,
void *buf, int dwords)
{
return trans->ops->write_mem(trans, addr, buf, dwords);
}
static inline u32 iwl_trans_write_mem32(struct iwl_trans *trans, u32 addr,
u32 val)
{
return iwl_trans_write_mem(trans, addr, &val, 1);
}
static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state) static inline void iwl_trans_set_pmi(struct iwl_trans *trans, bool state)
{ {
trans->ops->set_pmi(trans, state); trans->ops->set_pmi(trans, state);
} }
#define iwl_trans_grab_nic_access(trans, silent) \
__cond_lock(nic_access, \
likely((trans)->ops->grab_nic_access(trans, silent)))
static inline void __releases(nic_access)
iwl_trans_release_nic_access(struct iwl_trans *trans)
{
trans->ops->release_nic_access(trans);
__release(nic_access);
}
/***************************************************** /*****************************************************
* driver (transport) register/unregister functions * driver (transport) register/unregister functions
******************************************************/ ******************************************************/
......
...@@ -222,8 +222,6 @@ struct iwl_txq { ...@@ -222,8 +222,6 @@ struct iwl_txq {
* @rx_replenish: work that will be called when buffers need to be allocated * @rx_replenish: work that will be called when buffers need to be allocated
* @drv - pointer to iwl_drv * @drv - pointer to iwl_drv
* @trans: pointer to the generic transport area * @trans: pointer to the generic transport area
* @irq - the irq number for the device
* @irq_requested: true when the irq has been requested
* @scd_base_addr: scheduler sram base address in SRAM * @scd_base_addr: scheduler sram base address in SRAM
* @scd_bc_tbls: pointer to the byte count table of the scheduler * @scd_bc_tbls: pointer to the byte count table of the scheduler
* @kw: keep warm address * @kw: keep warm address
...@@ -234,6 +232,7 @@ struct iwl_txq { ...@@ -234,6 +232,7 @@ struct iwl_txq {
* @status - transport specific status flags * @status - transport specific status flags
* @cmd_queue - command queue number * @cmd_queue - command queue number
* @rx_buf_size_8k: 8 kB RX buffer size * @rx_buf_size_8k: 8 kB RX buffer size
* @bc_table_dword: true if the BC table expects DWORD (as opposed to bytes)
* @rx_page_order: page order for receive buffer size * @rx_page_order: page order for receive buffer size
* @wd_timeout: queue watchdog timeout (jiffies) * @wd_timeout: queue watchdog timeout (jiffies)
*/ */
...@@ -249,11 +248,9 @@ struct iwl_trans_pcie { ...@@ -249,11 +248,9 @@ struct iwl_trans_pcie {
int ict_index; int ict_index;
u32 inta; u32 inta;
bool use_ict; bool use_ict;
bool irq_requested;
struct tasklet_struct irq_tasklet; struct tasklet_struct irq_tasklet;
struct isr_statistics isr_stats; struct isr_statistics isr_stats;
unsigned int irq;
spinlock_t irq_lock; spinlock_t irq_lock;
u32 inta_mask; u32 inta_mask;
u32 scd_base_addr; u32 scd_base_addr;
...@@ -279,6 +276,7 @@ struct iwl_trans_pcie { ...@@ -279,6 +276,7 @@ struct iwl_trans_pcie {
u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS]; u8 no_reclaim_cmds[MAX_NO_RECLAIM_CMDS];
bool rx_buf_size_8k; bool rx_buf_size_8k;
bool bc_table_dword;
u32 rx_page_order; u32 rx_page_order;
const char **command_names; const char **command_names;
......
...@@ -545,6 +545,8 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) ...@@ -545,6 +545,8 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
return; return;
} }
cancel_work_sync(&trans_pcie->rx_replenish);
spin_lock_irqsave(&rxq->lock, flags); spin_lock_irqsave(&rxq->lock, flags);
iwl_pcie_rxq_free_rbs(trans); iwl_pcie_rxq_free_rbs(trans);
spin_unlock_irqrestore(&rxq->lock, flags); spin_unlock_irqrestore(&rxq->lock, flags);
......
...@@ -435,7 +435,7 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans, ...@@ -435,7 +435,7 @@ static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
} }
static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
const struct fw_img *fw) const struct fw_img *fw, bool run_in_rfkill)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret; int ret;
...@@ -454,7 +454,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans, ...@@ -454,7 +454,7 @@ static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
/* If platform's RF_KILL switch is NOT set to KILL */ /* If platform's RF_KILL switch is NOT set to KILL */
hw_rfkill = iwl_is_rfkill_set(trans); hw_rfkill = iwl_is_rfkill_set(trans);
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
if (hw_rfkill) if (hw_rfkill && !run_in_rfkill)
return -ERFKILL; return -ERFKILL;
iwl_write32(trans, CSR_INT, 0xFFFFFFFF); iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
...@@ -534,12 +534,6 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans) ...@@ -534,12 +534,6 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
iwl_enable_rfkill_int(trans); iwl_enable_rfkill_int(trans);
/* wait to make sure we flush pending tasklet*/
synchronize_irq(trans_pcie->irq);
tasklet_kill(&trans_pcie->irq_tasklet);
cancel_work_sync(&trans_pcie->rx_replenish);
/* stop and reset the on-board processor */ /* stop and reset the on-board processor */
iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET); iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
...@@ -564,33 +558,13 @@ static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans) ...@@ -564,33 +558,13 @@ static void iwl_trans_pcie_wowlan_suspend(struct iwl_trans *trans)
static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int err;
bool hw_rfkill; bool hw_rfkill;
int err;
trans_pcie->inta_mask = CSR_INI_SET_MASK;
if (!trans_pcie->irq_requested) {
tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
iwl_pcie_tasklet, (unsigned long)trans);
iwl_pcie_alloc_ict(trans);
err = request_irq(trans_pcie->irq, iwl_pcie_isr_ict,
IRQF_SHARED, DRV_NAME, trans);
if (err) {
IWL_ERR(trans, "Error allocating IRQ %d\n",
trans_pcie->irq);
goto error;
}
trans_pcie->irq_requested = true;
}
err = iwl_pcie_prepare_card_hw(trans); err = iwl_pcie_prepare_card_hw(trans);
if (err) { if (err) {
IWL_ERR(trans, "Error while preparing HW: %d\n", err); IWL_ERR(trans, "Error while preparing HW: %d\n", err);
goto err_free_irq; return err;
} }
iwl_pcie_apm_init(trans); iwl_pcie_apm_init(trans);
...@@ -601,15 +575,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans) ...@@ -601,15 +575,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
hw_rfkill = iwl_is_rfkill_set(trans); hw_rfkill = iwl_is_rfkill_set(trans);
iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill); iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
return err; return 0;
err_free_irq:
trans_pcie->irq_requested = false;
free_irq(trans_pcie->irq, trans);
error:
iwl_pcie_free_ict(trans);
tasklet_kill(&trans_pcie->irq_tasklet);
return err;
} }
static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans, static void iwl_trans_pcie_stop_hw(struct iwl_trans *trans,
...@@ -703,19 +669,21 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, ...@@ -703,19 +669,21 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
msecs_to_jiffies(trans_cfg->queue_watchdog_timeout); msecs_to_jiffies(trans_cfg->queue_watchdog_timeout);
trans_pcie->command_names = trans_cfg->command_names; trans_pcie->command_names = trans_cfg->command_names;
trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
} }
void iwl_trans_pcie_free(struct iwl_trans *trans) void iwl_trans_pcie_free(struct iwl_trans *trans)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
synchronize_irq(trans_pcie->pci_dev->irq);
tasklet_kill(&trans_pcie->irq_tasklet);
iwl_pcie_tx_free(trans); iwl_pcie_tx_free(trans);
iwl_pcie_rx_free(trans); iwl_pcie_rx_free(trans);
if (trans_pcie->irq_requested == true) { free_irq(trans_pcie->pci_dev->irq, trans);
free_irq(trans_pcie->irq, trans); iwl_pcie_free_ict(trans);
iwl_pcie_free_ict(trans);
}
pci_disable_msi(trans_pcie->pci_dev); pci_disable_msi(trans_pcie->pci_dev);
iounmap(trans_pcie->hw_base); iounmap(trans_pcie->hw_base);
...@@ -758,6 +726,107 @@ static int iwl_trans_pcie_resume(struct iwl_trans *trans) ...@@ -758,6 +726,107 @@ static int iwl_trans_pcie_resume(struct iwl_trans *trans)
} }
#endif /* CONFIG_PM_SLEEP */ #endif /* CONFIG_PM_SLEEP */
static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent)
{
int ret;
lockdep_assert_held(&trans->reg_lock);
/* this bit wakes up the NIC */
__iwl_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/*
* These bits say the device is running, and should keep running for
* at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
* but they do not indicate that embedded SRAM is restored yet;
* 3945 and 4965 have volatile SRAM, and must save/restore contents
* to/from host DRAM when sleeping/waking for power-saving.
* Each direction takes approximately 1/4 millisecond; with this
* overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
* series of register accesses are expected (e.g. reading Event Log),
* to keep device from sleeping.
*
* CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
* SRAM is okay/restored. We don't check that here because this call
* is just for hardware register access; but GP1 MAC_SLEEP check is a
* good idea before accessing 3945/4965 SRAM (e.g. reading Event Log).
*
* 5000 series and later (including 1000 series) have non-volatile SRAM,
* and do not save/restore SRAM when power cycling.
*/
ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
(CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
if (unlikely(ret < 0)) {
iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_FORCE_NMI);
if (!silent) {
u32 val = iwl_read32(trans, CSR_GP_CNTRL);
WARN_ONCE(1,
"Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
val);
return false;
}
}
return true;
}
static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
{
lockdep_assert_held(&trans->reg_lock);
__iwl_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/*
* Above we read the CSR_GP_CNTRL register, which will flush
* any previous writes, but we need the write that clears the
* MAC_ACCESS_REQ bit to be performed before any other writes
* scheduled on different CPUs (after we drop reg_lock).
*/
mmiowb();
}
static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
void *buf, int dwords)
{
unsigned long flags;
int offs, ret = 0;
u32 *vals = buf;
spin_lock_irqsave(&trans->reg_lock, flags);
if (iwl_trans_grab_nic_access(trans, false)) {
iwl_write32(trans, HBUS_TARG_MEM_RADDR, addr);
for (offs = 0; offs < dwords; offs++)
vals[offs] = iwl_read32(trans, HBUS_TARG_MEM_RDAT);
iwl_trans_release_nic_access(trans);
} else {
ret = -EBUSY;
}
spin_unlock_irqrestore(&trans->reg_lock, flags);
return ret;
}
static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
void *buf, int dwords)
{
unsigned long flags;
int offs, ret = 0;
u32 *vals = buf;
spin_lock_irqsave(&trans->reg_lock, flags);
if (iwl_trans_grab_nic_access(trans, false)) {
iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
for (offs = 0; offs < dwords; offs++)
iwl_write32(trans, HBUS_TARG_MEM_WDAT, vals[offs]);
iwl_trans_release_nic_access(trans);
} else {
ret = -EBUSY;
}
spin_unlock_irqrestore(&trans->reg_lock, flags);
return ret;
}
#define IWL_FLUSH_WAIT_MS 2000 #define IWL_FLUSH_WAIT_MS 2000
static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans) static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans)
...@@ -1235,8 +1304,12 @@ static const struct iwl_trans_ops trans_ops_pcie = { ...@@ -1235,8 +1304,12 @@ static const struct iwl_trans_ops trans_ops_pcie = {
.read32 = iwl_trans_pcie_read32, .read32 = iwl_trans_pcie_read32,
.read_prph = iwl_trans_pcie_read_prph, .read_prph = iwl_trans_pcie_read_prph,
.write_prph = iwl_trans_pcie_write_prph, .write_prph = iwl_trans_pcie_write_prph,
.read_mem = iwl_trans_pcie_read_mem,
.write_mem = iwl_trans_pcie_write_mem,
.configure = iwl_trans_pcie_configure, .configure = iwl_trans_pcie_configure,
.set_pmi = iwl_trans_pcie_set_pmi, .set_pmi = iwl_trans_pcie_set_pmi,
.grab_nic_access = iwl_trans_pcie_grab_nic_access,
.release_nic_access = iwl_trans_pcie_release_nic_access
}; };
struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
...@@ -1318,7 +1391,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, ...@@ -1318,7 +1391,6 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
} }
trans->dev = &pdev->dev; trans->dev = &pdev->dev;
trans_pcie->irq = pdev->irq;
trans_pcie->pci_dev = pdev; trans_pcie->pci_dev = pdev;
trans->hw_rev = iwl_read32(trans, CSR_HW_REV); trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
trans->hw_id = (pdev->device << 16) + pdev->subsystem_device; trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
...@@ -1344,8 +1416,27 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, ...@@ -1344,8 +1416,27 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
if (!trans->dev_cmd_pool) if (!trans->dev_cmd_pool)
goto out_pci_disable_msi; goto out_pci_disable_msi;
trans_pcie->inta_mask = CSR_INI_SET_MASK;
tasklet_init(&trans_pcie->irq_tasklet, (void (*)(unsigned long))
iwl_pcie_tasklet, (unsigned long)trans);
if (iwl_pcie_alloc_ict(trans))
goto out_free_cmd_pool;
err = request_irq(pdev->irq, iwl_pcie_isr_ict,
IRQF_SHARED, DRV_NAME, trans);
if (err) {
IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
goto out_free_ict;
}
return trans; return trans;
out_free_ict:
iwl_pcie_free_ict(trans);
out_free_cmd_pool:
kmem_cache_destroy(trans->dev_cmd_pool);
out_pci_disable_msi: out_pci_disable_msi:
pci_disable_msi(pdev); pci_disable_msi(pdev);
out_pci_release_regions: out_pci_release_regions:
......
...@@ -160,7 +160,7 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data) ...@@ -160,7 +160,7 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n", IWL_ERR(trans, "Current SW read_ptr %d write_ptr %d\n",
txq->q.read_ptr, txq->q.write_ptr); txq->q.read_ptr, txq->q.write_ptr);
iwl_read_targ_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf)); iwl_trans_read_mem_bytes(trans, scd_sram_addr, buf, sizeof(buf));
iwl_print_hex_error(trans, buf, sizeof(buf)); iwl_print_hex_error(trans, buf, sizeof(buf));
...@@ -173,9 +173,9 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data) ...@@ -173,9 +173,9 @@ static void iwl_pcie_txq_stuck_timer(unsigned long data)
u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; u8 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); bool active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
u32 tbl_dw = u32 tbl_dw =
iwl_read_targ_mem(trans, iwl_trans_read_mem32(trans,
trans_pcie->scd_base_addr + trans_pcie->scd_base_addr +
SCD_TRANS_TBL_OFFSET_QUEUE(i)); SCD_TRANS_TBL_OFFSET_QUEUE(i));
if (i & 0x1) if (i & 0x1)
tbl_dw = (tbl_dw & 0xFFFF0000) >> 16; tbl_dw = (tbl_dw & 0xFFFF0000) >> 16;
...@@ -237,7 +237,10 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans, ...@@ -237,7 +237,10 @@ static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
break; break;
} }
bc_ent = cpu_to_le16((len & 0xFFF) | (sta_id << 12)); if (trans_pcie->bc_table_dword)
len = DIV_ROUND_UP(len, 4);
bc_ent = cpu_to_le16(len | (sta_id << 12));
scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
...@@ -656,16 +659,16 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) ...@@ -656,16 +659,16 @@ void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
/* reset conext data memory */ /* reset conext data memory */
for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND; for (; a < trans_pcie->scd_base_addr + SCD_CONTEXT_MEM_UPPER_BOUND;
a += 4) a += 4)
iwl_write_targ_mem(trans, a, 0); iwl_trans_write_mem32(trans, a, 0);
/* reset tx status memory */ /* reset tx status memory */
for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND; for (; a < trans_pcie->scd_base_addr + SCD_TX_STTS_MEM_UPPER_BOUND;
a += 4) a += 4)
iwl_write_targ_mem(trans, a, 0); iwl_trans_write_mem32(trans, a, 0);
for (; a < trans_pcie->scd_base_addr + for (; a < trans_pcie->scd_base_addr +
SCD_TRANS_TBL_OFFSET_QUEUE( SCD_TRANS_TBL_OFFSET_QUEUE(
trans->cfg->base_params->num_of_queues); trans->cfg->base_params->num_of_queues);
a += 4) a += 4)
iwl_write_targ_mem(trans, a, 0); iwl_trans_write_mem32(trans, a, 0);
iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
trans_pcie->scd_bc_tbls.dma >> 10); trans_pcie->scd_bc_tbls.dma >> 10);
...@@ -1002,14 +1005,14 @@ static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, ...@@ -1002,14 +1005,14 @@ static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
tbl_dw_addr = trans_pcie->scd_base_addr + tbl_dw_addr = trans_pcie->scd_base_addr +
SCD_TRANS_TBL_OFFSET_QUEUE(txq_id); SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
tbl_dw = iwl_read_targ_mem(trans, tbl_dw_addr); tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr);
if (txq_id & 0x1) if (txq_id & 0x1)
tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
else else
tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
iwl_write_targ_mem(trans, tbl_dw_addr, tbl_dw); iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw);
return 0; return 0;
} }
...@@ -1068,9 +1071,9 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo, ...@@ -1068,9 +1071,9 @@ void iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, int fifo,
iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
/* Set up Tx window size and frame limit for this queue */ /* Set up Tx window size and frame limit for this queue */
iwl_write_targ_mem(trans, trans_pcie->scd_base_addr + iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0); SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
iwl_write_targ_mem(trans, trans_pcie->scd_base_addr + iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) & ((frame_limit << SCD_QUEUE_CTX_REG2_WIN_SIZE_POS) &
SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) | SCD_QUEUE_CTX_REG2_WIN_SIZE_MSK) |
...@@ -1101,8 +1104,8 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id) ...@@ -1101,8 +1104,8 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
iwl_pcie_txq_set_inactive(trans, txq_id); iwl_pcie_txq_set_inactive(trans, txq_id);
_iwl_write_targ_mem_dwords(trans, stts_addr, iwl_trans_write_mem(trans, stts_addr, (void *)zero_val,
zero_val, ARRAY_SIZE(zero_val)); ARRAY_SIZE(zero_val));
iwl_pcie_txq_unmap(trans, txq_id); iwl_pcie_txq_unmap(trans, txq_id);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment