Commit f2da4ceb authored by Johannes Berg's avatar Johannes Berg Committed by Greg Kroah-Hartman

iwlwifi: handle DMA mapping failures

commit 7c341582 upstream.

The RX replenish code doesn't handle DMA mapping failures,
which will cause issues if there actually is a failure. This
was reported by Shuah Khan who found a DMA mapping framework
warning ("device driver failed to check map error").
Reported-by: default avatarShuah Khan <shuah.khan@hp.com>
Reviewed-by: default avatarEmmanuel Grumbach <emmanuel.grumbach@intel.com>
Signed-off-by: default avatarJohannes Berg <johannes.berg@intel.com>
[bwh: Backported to 3.2:
 - Adjust filename, context, indentation
 - Use bus(trans) instead of trans where necessary
 - Use hw_params(trans).rx_page_order instead of trans_pcie->rx_page_order]
Signed-off-by: default avatarBen Hutchings <ben@decadent.org.uk>
[wujg: Backported to 3.4:
 - Adjust context
 - Use trans instead of bus(trans)
 - Use hw_params(trans).rx_page_order instead of trans_pcie->rx_page_order]
Signed-off-by: default avatarJianguo Wu <wujianguo@huawei.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent af720fc6
......@@ -315,6 +315,14 @@ static void iwlagn_rx_allocate(struct iwl_trans *trans, gfp_t priority)
rxb->page_dma = dma_map_page(trans->dev, page, 0,
PAGE_SIZE << hw_params(trans).rx_page_order,
DMA_FROM_DEVICE);
if (dma_mapping_error(trans->dev, rxb->page_dma)) {
rxb->page = NULL;
spin_lock_irqsave(&rxq->lock, flags);
list_add(&rxb->list, &rxq->rx_used);
spin_unlock_irqrestore(&rxq->lock, flags);
__free_pages(page, hw_params(trans).rx_page_order);
return;
}
/* dma address must be no more than 36 bits */
BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
/* and also 256 byte aligned! */
......@@ -450,8 +458,19 @@ static void iwl_rx_handle_rxbuf(struct iwl_trans *trans,
dma_map_page(trans->dev, rxb->page, 0,
PAGE_SIZE << hw_params(trans).rx_page_order,
DMA_FROM_DEVICE);
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
if (dma_mapping_error(trans->dev, rxb->page_dma)) {
/*
* free the page(s) as well to not break
* the invariant that the items on the used
* list have no page(s)
*/
__free_pages(rxb->page, hw_params(trans).rx_page_order);
rxb->page = NULL;
list_add_tail(&rxb->list, &rxq->rx_used);
} else {
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
}
} else
list_add_tail(&rxb->list, &rxq->rx_used);
spin_unlock_irqrestore(&rxq->lock, flags);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment