Commit 3f7832c2 authored by Timur Tabi's avatar Timur Tabi Committed by David S. Miller

Revert "net: qcom/emac: enforce DMA address restrictions"

This reverts commit df1ec1b9.

It turns out that memory allocated via dma_alloc_coherent is always
aligned to the size of the buffer, so there's no way the RRD and RFD
can ever be in separate 32-bit regions.
Signed-off-by: default avatarTimur Tabi <timur@codeaurora.org>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 437d2762
...@@ -734,11 +734,6 @@ static int emac_rx_descs_alloc(struct emac_adapter *adpt) ...@@ -734,11 +734,6 @@ static int emac_rx_descs_alloc(struct emac_adapter *adpt)
rx_q->rrd.size = rx_q->rrd.count * (adpt->rrd_size * 4); rx_q->rrd.size = rx_q->rrd.count * (adpt->rrd_size * 4);
rx_q->rfd.size = rx_q->rfd.count * (adpt->rfd_size * 4); rx_q->rfd.size = rx_q->rfd.count * (adpt->rfd_size * 4);
/* Check if the RRD and RFD are aligned properly, and if not, adjust. */
if (upper_32_bits(ring_header->dma_addr) !=
upper_32_bits(ring_header->dma_addr + ALIGN(rx_q->rrd.size, 8)))
ring_header->used = ALIGN(rx_q->rrd.size, 8);
rx_q->rrd.dma_addr = ring_header->dma_addr + ring_header->used; rx_q->rrd.dma_addr = ring_header->dma_addr + ring_header->used;
rx_q->rrd.v_addr = ring_header->v_addr + ring_header->used; rx_q->rrd.v_addr = ring_header->v_addr + ring_header->used;
ring_header->used += ALIGN(rx_q->rrd.size, 8); ring_header->used += ALIGN(rx_q->rrd.size, 8);
...@@ -772,18 +767,11 @@ int emac_mac_rx_tx_rings_alloc_all(struct emac_adapter *adpt) ...@@ -772,18 +767,11 @@ int emac_mac_rx_tx_rings_alloc_all(struct emac_adapter *adpt)
/* Ring DMA buffer. Each ring may need up to 8 bytes for alignment, /* Ring DMA buffer. Each ring may need up to 8 bytes for alignment,
* hence the additional padding bytes are allocated. * hence the additional padding bytes are allocated.
*
* Also double the memory allocated for the RRD so that we can
* re-align it if necessary. The EMAC has a restriction that the
* upper 32 bits of the base addresses for the RFD and RRD rings
* must be the same. It is extremely unlikely that this is not the
* case, since the rings are only a few KB in size. However, we
* need to check for this anyway, and if the two rings are not
* compliant, then we re-align.
*/ */
ring_header->size = ALIGN(num_tx_descs * (adpt->tpd_size * 4), 8) + ring_header->size = num_tx_descs * (adpt->tpd_size * 4) +
ALIGN(num_rx_descs * (adpt->rfd_size * 4), 8) + num_rx_descs * (adpt->rfd_size * 4) +
ALIGN(num_rx_descs * (adpt->rrd_size * 4), 8) * 2; num_rx_descs * (adpt->rrd_size * 4) +
8 + 2 * 8; /* 8 byte per one Tx and two Rx rings */
ring_header->used = 0; ring_header->used = 0;
ring_header->v_addr = dma_zalloc_coherent(dev, ring_header->size, ring_header->v_addr = dma_zalloc_coherent(dev, ring_header->size,
...@@ -792,23 +780,26 @@ int emac_mac_rx_tx_rings_alloc_all(struct emac_adapter *adpt) ...@@ -792,23 +780,26 @@ int emac_mac_rx_tx_rings_alloc_all(struct emac_adapter *adpt)
if (!ring_header->v_addr) if (!ring_header->v_addr)
return -ENOMEM; return -ENOMEM;
ret = emac_rx_descs_alloc(adpt); ring_header->used = ALIGN(ring_header->dma_addr, 8) -
if (ret) { ring_header->dma_addr;
netdev_err(adpt->netdev, "error: Rx Queue alloc failed\n");
goto err_alloc_rx;
}
ret = emac_tx_q_desc_alloc(adpt, &adpt->tx_q); ret = emac_tx_q_desc_alloc(adpt, &adpt->tx_q);
if (ret) { if (ret) {
netdev_err(adpt->netdev, "transmit queue allocation failed\n"); netdev_err(adpt->netdev, "error: Tx Queue alloc failed\n");
goto err_alloc_tx; goto err_alloc_tx;
} }
ret = emac_rx_descs_alloc(adpt);
if (ret) {
netdev_err(adpt->netdev, "error: Rx Queue alloc failed\n");
goto err_alloc_rx;
}
return 0; return 0;
err_alloc_tx:
emac_rx_q_bufs_free(adpt);
err_alloc_rx: err_alloc_rx:
emac_tx_q_bufs_free(adpt);
err_alloc_tx:
dma_free_coherent(dev, ring_header->size, dma_free_coherent(dev, ring_header->size,
ring_header->v_addr, ring_header->dma_addr); ring_header->v_addr, ring_header->dma_addr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment