Commit dfcfeef9 authored by Sara Sharon's avatar Sara Sharon Committed by Luca Coelho

iwlwifi: pcie: grab NIC access only once on RX init

When initializing RX we grab NIC access for every read and
write. This is redundant - we can just grab access once.
Signed-off-by: default avatarSara Sharon <sara.sharon@intel.com>
Signed-off-by: default avatarLuca Coelho <luciano.coelho@intel.com>
parent 1554ed20
...@@ -161,10 +161,11 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) ...@@ -161,10 +161,11 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
return cpu_to_le32((u32)(dma_addr >> 8)); return cpu_to_le32((u32)(dma_addr >> 8));
} }
static void iwl_pcie_write_prph_64(struct iwl_trans *trans, u64 ofs, u64 val) static void iwl_pcie_write_prph_64_no_grab(struct iwl_trans *trans, u64 ofs,
u64 val)
{ {
iwl_write_prph(trans, ofs, val & 0xffffffff); iwl_write_prph_no_grab(trans, ofs, val & 0xffffffff);
iwl_write_prph(trans, ofs + 4, val >> 32); iwl_write_prph_no_grab(trans, ofs + 4, val >> 32);
} }
/* /*
...@@ -698,6 +699,7 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) ...@@ -698,6 +699,7 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 rb_size; u32 rb_size;
unsigned long flags;
const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */ const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
switch (trans_pcie->rx_buf_size) { switch (trans_pcie->rx_buf_size) {
...@@ -715,23 +717,26 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) ...@@ -715,23 +717,26 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K; rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
} }
if (!iwl_trans_grab_nic_access(trans, &flags))
return;
/* Stop Rx DMA */ /* Stop Rx DMA */
iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0); iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
/* reset and flush pointers */ /* reset and flush pointers */
iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0); iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0); iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
iwl_write_direct32(trans, FH_RSCSR_CHNL0_RDPTR, 0); iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
/* Reset driver's Rx queue write index */ /* Reset driver's Rx queue write index */
iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0); iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
/* Tell device where to find RBD circular buffer in DRAM */ /* Tell device where to find RBD circular buffer in DRAM */
iwl_write_direct32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG, iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
(u32)(rxq->bd_dma >> 8)); (u32)(rxq->bd_dma >> 8));
/* Tell device where in DRAM to update its Rx status */ /* Tell device where in DRAM to update its Rx status */
iwl_write_direct32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG, iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
rxq->rb_stts_dma >> 4); rxq->rb_stts_dma >> 4);
/* Enable Rx DMA /* Enable Rx DMA
* FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
...@@ -741,13 +746,15 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) ...@@ -741,13 +746,15 @@ static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
* RB timeout 0x10 * RB timeout 0x10
* 256 RBDs * 256 RBDs
*/ */
iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL | FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY | FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL | FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
rb_size| rb_size |
(RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)| (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
(rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS)); (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
iwl_trans_release_nic_access(trans, &flags);
/* Set interrupt coalescing timer to default (2048 usecs) */ /* Set interrupt coalescing timer to default (2048 usecs) */
iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
...@@ -761,6 +768,7 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) ...@@ -761,6 +768,7 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 rb_size, enabled = 0; u32 rb_size, enabled = 0;
unsigned long flags;
int i; int i;
switch (trans_pcie->rx_buf_size) { switch (trans_pcie->rx_buf_size) {
...@@ -778,25 +786,31 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) ...@@ -778,25 +786,31 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
rb_size = RFH_RXF_DMA_RB_SIZE_4K; rb_size = RFH_RXF_DMA_RB_SIZE_4K;
} }
if (!iwl_trans_grab_nic_access(trans, &flags))
return;
/* Stop Rx DMA */ /* Stop Rx DMA */
iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0); iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0);
/* disable free amd used rx queue operation */ /* disable free amd used rx queue operation */
iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, 0); iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0);
for (i = 0; i < trans->num_rx_queues; i++) { for (i = 0; i < trans->num_rx_queues; i++) {
/* Tell device where to find RBD free table in DRAM */ /* Tell device where to find RBD free table in DRAM */
iwl_pcie_write_prph_64(trans, RFH_Q_FRBDCB_BA_LSB(i), iwl_pcie_write_prph_64_no_grab(trans,
(u64)(trans_pcie->rxq[i].bd_dma)); RFH_Q_FRBDCB_BA_LSB(i),
trans_pcie->rxq[i].bd_dma);
/* Tell device where to find RBD used table in DRAM */ /* Tell device where to find RBD used table in DRAM */
iwl_pcie_write_prph_64(trans, RFH_Q_URBDCB_BA_LSB(i), iwl_pcie_write_prph_64_no_grab(trans,
(u64)(trans_pcie->rxq[i].used_bd_dma)); RFH_Q_URBDCB_BA_LSB(i),
trans_pcie->rxq[i].used_bd_dma);
/* Tell device where in DRAM to update its Rx status */ /* Tell device where in DRAM to update its Rx status */
iwl_pcie_write_prph_64(trans, RFH_Q_URBD_STTS_WPTR_LSB(i), iwl_pcie_write_prph_64_no_grab(trans,
trans_pcie->rxq[i].rb_stts_dma); RFH_Q_URBD_STTS_WPTR_LSB(i),
trans_pcie->rxq[i].rb_stts_dma);
/* Reset device indice tables */ /* Reset device indice tables */
iwl_write_prph(trans, RFH_Q_FRBDCB_WIDX(i), 0); iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0);
iwl_write_prph(trans, RFH_Q_FRBDCB_RIDX(i), 0); iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0);
iwl_write_prph(trans, RFH_Q_URBDCB_WIDX(i), 0); iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0);
enabled |= BIT(i) | BIT(i + 16); enabled |= BIT(i) | BIT(i + 16);
} }
...@@ -812,23 +826,26 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) ...@@ -812,23 +826,26 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
* Drop frames that exceed RB size * Drop frames that exceed RB size
* 512 RBDs * 512 RBDs
*/ */
iwl_write_prph(trans, RFH_RXF_DMA_CFG, iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG,
RFH_DMA_EN_ENABLE_VAL | RFH_DMA_EN_ENABLE_VAL |
rb_size | RFH_RXF_DMA_SINGLE_FRAME_MASK | rb_size | RFH_RXF_DMA_SINGLE_FRAME_MASK |
RFH_RXF_DMA_MIN_RB_4_8 | RFH_RXF_DMA_MIN_RB_4_8 |
RFH_RXF_DMA_DROP_TOO_LARGE_MASK | RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
RFH_RXF_DMA_RBDCB_SIZE_512); RFH_RXF_DMA_RBDCB_SIZE_512);
/* /*
* Activate DMA snooping. * Activate DMA snooping.
* Set RX DMA chunk size to 64B * Set RX DMA chunk size to 64B
* Default queue is 0 * Default queue is 0
*/ */
iwl_write_prph(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP | iwl_write_prph_no_grab(trans, RFH_GEN_CFG, RFH_GEN_CFG_RFH_DMA_SNOOP |
(DEFAULT_RXQ_NUM << RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS) | (DEFAULT_RXQ_NUM <<
RFH_GEN_CFG_SERVICE_DMA_SNOOP); RFH_GEN_CFG_DEFAULT_RXQ_NUM_POS) |
RFH_GEN_CFG_SERVICE_DMA_SNOOP);
/* Enable the relevant rx queues */ /* Enable the relevant rx queues */
iwl_write_prph(trans, RFH_RXF_RXQ_ACTIVE, enabled); iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);
iwl_trans_release_nic_access(trans, &flags);
/* Set interrupt coalescing timer to default (2048 usecs) */ /* Set interrupt coalescing timer to default (2048 usecs) */
iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF); iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment