Commit 80084e35 authored by Johannes Berg's avatar Johannes Berg Committed by Luca Coelho

iwlwifi: pcie: map only used part of RX buffers

We don't need to map *everything* of the RX buffers, we won't use
that much, map only the part we're going to use. This save some
IOMMU space (if applicable and it can deal with that) and also
prepares a bit for mapping partial pages for 2K buffers later.
Signed-off-by: default avatarJohannes Berg <johannes.berg@intel.com>
Signed-off-by: default avatarLuca Coelho <luciano.coelho@intel.com>
parent c042f0c7
...@@ -358,6 +358,24 @@ iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size) ...@@ -358,6 +358,24 @@ iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size)
} }
} }
static inline int
iwl_trans_get_rb_size(enum iwl_amsdu_size rb_size)
{
switch (rb_size) {
case IWL_AMSDU_2K:
return 2 * 1024;
case IWL_AMSDU_4K:
return 4 * 1024;
case IWL_AMSDU_8K:
return 8 * 1024;
case IWL_AMSDU_12K:
return 12 * 1024;
default:
WARN_ON(1);
return 0;
}
}
struct iwl_hcmd_names { struct iwl_hcmd_names {
u8 cmd_id; u8 cmd_id;
const char *const cmd_name; const char *const cmd_name;
......
...@@ -491,6 +491,7 @@ struct cont_rec { ...@@ -491,6 +491,7 @@ struct cont_rec {
* @sw_csum_tx: if true, then the transport will compute the csum of the TXed * @sw_csum_tx: if true, then the transport will compute the csum of the TXed
* frame. * frame.
* @rx_page_order: page order for receive buffer size * @rx_page_order: page order for receive buffer size
* @rx_buf_bytes: RX buffer (RB) size in bytes
* @reg_lock: protect hw register access * @reg_lock: protect hw register access
* @mutex: to protect stop_device / start_fw / start_hw * @mutex: to protect stop_device / start_fw / start_hw
* @cmd_in_flight: true when we have a host command in flight * @cmd_in_flight: true when we have a host command in flight
...@@ -581,6 +582,7 @@ struct iwl_trans_pcie { ...@@ -581,6 +582,7 @@ struct iwl_trans_pcie {
bool sw_csum_tx; bool sw_csum_tx;
bool pcie_dbg_dumped_once; bool pcie_dbg_dumped_once;
u32 rx_page_order; u32 rx_page_order;
u32 rx_buf_bytes;
/*protect hw register */ /*protect hw register */
spinlock_t reg_lock; spinlock_t reg_lock;
......
...@@ -485,7 +485,7 @@ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, ...@@ -485,7 +485,7 @@ void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
/* Get physical address of the RB */ /* Get physical address of the RB */
rxb->page_dma = rxb->page_dma =
dma_map_page(trans->dev, page, 0, dma_map_page(trans->dev, page, 0,
PAGE_SIZE << trans_pcie->rx_page_order, trans_pcie->rx_buf_bytes,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (dma_mapping_error(trans->dev, rxb->page_dma)) { if (dma_mapping_error(trans->dev, rxb->page_dma)) {
rxb->page = NULL; rxb->page = NULL;
...@@ -514,8 +514,7 @@ void iwl_pcie_free_rbs_pool(struct iwl_trans *trans) ...@@ -514,8 +514,7 @@ void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
if (!trans_pcie->rx_pool[i].page) if (!trans_pcie->rx_pool[i].page)
continue; continue;
dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma, dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
PAGE_SIZE << trans_pcie->rx_page_order, trans_pcie->rx_buf_bytes, DMA_FROM_DEVICE);
DMA_FROM_DEVICE);
__free_pages(trans_pcie->rx_pool[i].page, __free_pages(trans_pcie->rx_pool[i].page,
trans_pcie->rx_page_order); trans_pcie->rx_page_order);
trans_pcie->rx_pool[i].page = NULL; trans_pcie->rx_pool[i].page = NULL;
...@@ -575,7 +574,7 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans) ...@@ -575,7 +574,7 @@ static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
/* Get physical address of the RB */ /* Get physical address of the RB */
rxb->page_dma = dma_map_page(trans->dev, page, 0, rxb->page_dma = dma_map_page(trans->dev, page, 0,
PAGE_SIZE << trans_pcie->rx_page_order, trans_pcie->rx_buf_bytes,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (dma_mapping_error(trans->dev, rxb->page_dma)) { if (dma_mapping_error(trans->dev, rxb->page_dma)) {
rxb->page = NULL; rxb->page = NULL;
...@@ -1248,7 +1247,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, ...@@ -1248,7 +1247,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue]; struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
bool page_stolen = false; bool page_stolen = false;
int max_len = PAGE_SIZE << trans_pcie->rx_page_order; int max_len = trans_pcie->rx_buf_bytes;
u32 offset = 0; u32 offset = 0;
if (WARN_ON(!rxb)) if (WARN_ON(!rxb))
...@@ -1369,7 +1368,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, ...@@ -1369,7 +1368,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
if (rxb->page != NULL) { if (rxb->page != NULL) {
rxb->page_dma = rxb->page_dma =
dma_map_page(trans->dev, rxb->page, 0, dma_map_page(trans->dev, rxb->page, 0,
PAGE_SIZE << trans_pcie->rx_page_order, trans_pcie->rx_buf_bytes,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (dma_mapping_error(trans->dev, rxb->page_dma)) { if (dma_mapping_error(trans->dev, rxb->page_dma)) {
/* /*
......
...@@ -1915,6 +1915,8 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans, ...@@ -1915,6 +1915,8 @@ static void iwl_trans_pcie_configure(struct iwl_trans *trans,
trans_pcie->rx_buf_size = trans_cfg->rx_buf_size; trans_pcie->rx_buf_size = trans_cfg->rx_buf_size;
trans_pcie->rx_page_order = trans_pcie->rx_page_order =
iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size); iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size);
trans_pcie->rx_buf_bytes =
iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
trans_pcie->bc_table_dword = trans_cfg->bc_table_dword; trans_pcie->bc_table_dword = trans_cfg->bc_table_dword;
trans_pcie->scd_set_active = trans_cfg->scd_set_active; trans_pcie->scd_set_active = trans_cfg->scd_set_active;
...@@ -2933,7 +2935,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, ...@@ -2933,7 +2935,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
int allocated_rb_nums) int allocated_rb_nums)
{ {
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int max_len = PAGE_SIZE << trans_pcie->rx_page_order; int max_len = trans_pcie->rx_buf_bytes;
/* Dump RBs is supported only for pre-9000 devices (1 queue) */ /* Dump RBs is supported only for pre-9000 devices (1 queue) */
struct iwl_rxq *rxq = &trans_pcie->rxq[0]; struct iwl_rxq *rxq = &trans_pcie->rxq[0];
u32 i, r, j, rb_len = 0; u32 i, r, j, rb_len = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment