Commit d1626825 authored by Kazuya Mizuguchi's avatar Kazuya Mizuguchi Committed by Greg Kroah-Hartman

ravb: unmap descriptors when freeing rings


[ Upstream commit a47b70ea ]

"swiotlb buffer is full" errors occur after repeated initialisation of a
device - f.e. suspend/resume or ip link set up/down. This is because memory
mapped using dma_map_single() in ravb_ring_format() and ravb_start_xmit()
is not released.  Resolve this problem by unmapping descriptors when
freeing rings.

Fixes: c156633f ("Renesas Ethernet AVB driver proper")
Signed-off-by: default avatarKazuya Mizuguchi <kazuya.mizuguchi.ks@renesas.com>
[simon: reworked]
Signed-off-by: default avatarSimon Horman <horms+renesas@verge.net.au>
Acked-by: default avatarSergei Shtylyov <sergei.shtylyov@cogentembedded.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
Signed-off-by: default avatarSasha Levin <alexander.levin@verizon.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 47c362f1
...@@ -179,6 +179,49 @@ static struct mdiobb_ops bb_ops = { ...@@ -179,6 +179,49 @@ static struct mdiobb_ops bb_ops = {
.get_mdio_data = ravb_get_mdio_data, .get_mdio_data = ravb_get_mdio_data,
}; };
/* Free TX skb function for AVB-IP */
static int ravb_tx_free(struct net_device *ndev, int q, bool free_txed_only)
{
struct ravb_private *priv = netdev_priv(ndev);
struct net_device_stats *stats = &priv->stats[q];
struct ravb_tx_desc *desc;
int free_num = 0;
int entry;
u32 size;
for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
bool txed;
entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
NUM_TX_DESC);
desc = &priv->tx_ring[q][entry];
txed = desc->die_dt == DT_FEMPTY;
if (free_txed_only && !txed)
break;
/* Descriptor type must be checked before all other reads */
dma_rmb();
size = le16_to_cpu(desc->ds_tagl) & TX_DS;
/* Free the original skb. */
if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
size, DMA_TO_DEVICE);
/* Last packet descriptor? */
if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
entry /= NUM_TX_DESC;
dev_kfree_skb_any(priv->tx_skb[q][entry]);
priv->tx_skb[q][entry] = NULL;
if (txed)
stats->tx_packets++;
}
free_num++;
}
if (txed)
stats->tx_bytes += size;
desc->die_dt = DT_EEMPTY;
}
return free_num;
}
/* Free skb's and DMA buffers for Ethernet AVB */ /* Free skb's and DMA buffers for Ethernet AVB */
static void ravb_ring_free(struct net_device *ndev, int q) static void ravb_ring_free(struct net_device *ndev, int q)
{ {
...@@ -194,19 +237,21 @@ static void ravb_ring_free(struct net_device *ndev, int q) ...@@ -194,19 +237,21 @@ static void ravb_ring_free(struct net_device *ndev, int q)
kfree(priv->rx_skb[q]); kfree(priv->rx_skb[q]);
priv->rx_skb[q] = NULL; priv->rx_skb[q] = NULL;
/* Free TX skb ringbuffer */
if (priv->tx_skb[q]) {
for (i = 0; i < priv->num_tx_ring[q]; i++)
dev_kfree_skb(priv->tx_skb[q][i]);
}
kfree(priv->tx_skb[q]);
priv->tx_skb[q] = NULL;
/* Free aligned TX buffers */ /* Free aligned TX buffers */
kfree(priv->tx_align[q]); kfree(priv->tx_align[q]);
priv->tx_align[q] = NULL; priv->tx_align[q] = NULL;
if (priv->rx_ring[q]) { if (priv->rx_ring[q]) {
for (i = 0; i < priv->num_rx_ring[q]; i++) {
struct ravb_ex_rx_desc *desc = &priv->rx_ring[q][i];
if (!dma_mapping_error(ndev->dev.parent,
le32_to_cpu(desc->dptr)))
dma_unmap_single(ndev->dev.parent,
le32_to_cpu(desc->dptr),
PKT_BUF_SZ,
DMA_FROM_DEVICE);
}
ring_size = sizeof(struct ravb_ex_rx_desc) * ring_size = sizeof(struct ravb_ex_rx_desc) *
(priv->num_rx_ring[q] + 1); (priv->num_rx_ring[q] + 1);
dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q], dma_free_coherent(ndev->dev.parent, ring_size, priv->rx_ring[q],
...@@ -215,12 +260,20 @@ static void ravb_ring_free(struct net_device *ndev, int q) ...@@ -215,12 +260,20 @@ static void ravb_ring_free(struct net_device *ndev, int q)
} }
if (priv->tx_ring[q]) { if (priv->tx_ring[q]) {
ravb_tx_free(ndev, q, false);
ring_size = sizeof(struct ravb_tx_desc) * ring_size = sizeof(struct ravb_tx_desc) *
(priv->num_tx_ring[q] * NUM_TX_DESC + 1); (priv->num_tx_ring[q] * NUM_TX_DESC + 1);
dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q], dma_free_coherent(ndev->dev.parent, ring_size, priv->tx_ring[q],
priv->tx_desc_dma[q]); priv->tx_desc_dma[q]);
priv->tx_ring[q] = NULL; priv->tx_ring[q] = NULL;
} }
/* Free TX skb ringbuffer.
* SKBs are freed by ravb_tx_free() call above.
*/
kfree(priv->tx_skb[q]);
priv->tx_skb[q] = NULL;
} }
/* Format skb and descriptor buffer for Ethernet AVB */ /* Format skb and descriptor buffer for Ethernet AVB */
...@@ -431,44 +484,6 @@ static int ravb_dmac_init(struct net_device *ndev) ...@@ -431,44 +484,6 @@ static int ravb_dmac_init(struct net_device *ndev)
return 0; return 0;
} }
/* Free TX skb function for AVB-IP */
static int ravb_tx_free(struct net_device *ndev, int q)
{
struct ravb_private *priv = netdev_priv(ndev);
struct net_device_stats *stats = &priv->stats[q];
struct ravb_tx_desc *desc;
int free_num = 0;
int entry;
u32 size;
for (; priv->cur_tx[q] - priv->dirty_tx[q] > 0; priv->dirty_tx[q]++) {
entry = priv->dirty_tx[q] % (priv->num_tx_ring[q] *
NUM_TX_DESC);
desc = &priv->tx_ring[q][entry];
if (desc->die_dt != DT_FEMPTY)
break;
/* Descriptor type must be checked before all other reads */
dma_rmb();
size = le16_to_cpu(desc->ds_tagl) & TX_DS;
/* Free the original skb. */
if (priv->tx_skb[q][entry / NUM_TX_DESC]) {
dma_unmap_single(ndev->dev.parent, le32_to_cpu(desc->dptr),
size, DMA_TO_DEVICE);
/* Last packet descriptor? */
if (entry % NUM_TX_DESC == NUM_TX_DESC - 1) {
entry /= NUM_TX_DESC;
dev_kfree_skb_any(priv->tx_skb[q][entry]);
priv->tx_skb[q][entry] = NULL;
stats->tx_packets++;
}
free_num++;
}
stats->tx_bytes += size;
desc->die_dt = DT_EEMPTY;
}
return free_num;
}
static void ravb_get_tx_tstamp(struct net_device *ndev) static void ravb_get_tx_tstamp(struct net_device *ndev)
{ {
struct ravb_private *priv = netdev_priv(ndev); struct ravb_private *priv = netdev_priv(ndev);
...@@ -902,7 +917,7 @@ static int ravb_poll(struct napi_struct *napi, int budget) ...@@ -902,7 +917,7 @@ static int ravb_poll(struct napi_struct *napi, int budget)
spin_lock_irqsave(&priv->lock, flags); spin_lock_irqsave(&priv->lock, flags);
/* Clear TX interrupt */ /* Clear TX interrupt */
ravb_write(ndev, ~mask, TIS); ravb_write(ndev, ~mask, TIS);
ravb_tx_free(ndev, q); ravb_tx_free(ndev, q, true);
netif_wake_subqueue(ndev, q); netif_wake_subqueue(ndev, q);
mmiowb(); mmiowb();
spin_unlock_irqrestore(&priv->lock, flags); spin_unlock_irqrestore(&priv->lock, flags);
...@@ -1571,7 +1586,8 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -1571,7 +1586,8 @@ static netdev_tx_t ravb_start_xmit(struct sk_buff *skb, struct net_device *ndev)
priv->cur_tx[q] += NUM_TX_DESC; priv->cur_tx[q] += NUM_TX_DESC;
if (priv->cur_tx[q] - priv->dirty_tx[q] > if (priv->cur_tx[q] - priv->dirty_tx[q] >
(priv->num_tx_ring[q] - 1) * NUM_TX_DESC && !ravb_tx_free(ndev, q)) (priv->num_tx_ring[q] - 1) * NUM_TX_DESC &&
!ravb_tx_free(ndev, q, true))
netif_stop_subqueue(ndev, q); netif_stop_subqueue(ndev, q);
exit: exit:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment