Commit 595e802e authored by Madalin Bucur's avatar Madalin Bucur Committed by David S. Miller

dpaa_eth: DPAA SGT needs to be 256B

The DPAA HW requires that at least 256 bytes from the start of the
first scatter-gather table entry are allocated and accessible. The
hardware reads the maximum size the table can have in one access,
thus requiring that the allocation and mapping to be done for the
maximum size of 256B even if there is a smaller number of entries
in the table.
Signed-off-by: default avatarMadalin Bucur <madalin.bucur@nxp.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent b95f6fbc
...@@ -125,6 +125,9 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms"); ...@@ -125,6 +125,9 @@ MODULE_PARM_DESC(tx_timeout, "The Tx timeout in ms");
/* Default alignment for start of data in an Rx FD */ /* Default alignment for start of data in an Rx FD */
#define DPAA_FD_DATA_ALIGNMENT 16 #define DPAA_FD_DATA_ALIGNMENT 16
/* The DPAA requires 256 bytes reserved and mapped for the SGT */
#define DPAA_SGT_SIZE 256
/* Values for the L3R field of the FM Parse Results /* Values for the L3R field of the FM Parse Results
*/ */
/* L3 Type field: First IP Present IPv4 */ /* L3 Type field: First IP Present IPv4 */
...@@ -1617,8 +1620,8 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv, ...@@ -1617,8 +1620,8 @@ static struct sk_buff *dpaa_cleanup_tx_fd(const struct dpaa_priv *priv,
if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) { if (unlikely(qm_fd_get_format(fd) == qm_fd_sg)) {
nr_frags = skb_shinfo(skb)->nr_frags; nr_frags = skb_shinfo(skb)->nr_frags;
dma_unmap_single(dev, addr, qm_fd_get_offset(fd) + dma_unmap_single(dev, addr,
sizeof(struct qm_sg_entry) * (1 + nr_frags), qm_fd_get_offset(fd) + DPAA_SGT_SIZE,
dma_dir); dma_dir);
/* The sgt buffer has been allocated with netdev_alloc_frag(), /* The sgt buffer has been allocated with netdev_alloc_frag(),
...@@ -1903,8 +1906,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, ...@@ -1903,8 +1906,7 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
void *sgt_buf; void *sgt_buf;
/* get a page frag to store the SGTable */ /* get a page frag to store the SGTable */
sz = SKB_DATA_ALIGN(priv->tx_headroom + sz = SKB_DATA_ALIGN(priv->tx_headroom + DPAA_SGT_SIZE);
sizeof(struct qm_sg_entry) * (1 + nr_frags));
sgt_buf = netdev_alloc_frag(sz); sgt_buf = netdev_alloc_frag(sz);
if (unlikely(!sgt_buf)) { if (unlikely(!sgt_buf)) {
netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n", netdev_err(net_dev, "netdev_alloc_frag() failed for size %d\n",
...@@ -1972,9 +1974,8 @@ static int skb_to_sg_fd(struct dpaa_priv *priv, ...@@ -1972,9 +1974,8 @@ static int skb_to_sg_fd(struct dpaa_priv *priv,
skbh = (struct sk_buff **)buffer_start; skbh = (struct sk_buff **)buffer_start;
*skbh = skb; *skbh = skb;
addr = dma_map_single(dev, buffer_start, priv->tx_headroom + addr = dma_map_single(dev, buffer_start,
sizeof(struct qm_sg_entry) * (1 + nr_frags), priv->tx_headroom + DPAA_SGT_SIZE, dma_dir);
dma_dir);
if (unlikely(dma_mapping_error(dev, addr))) { if (unlikely(dma_mapping_error(dev, addr))) {
dev_err(dev, "DMA mapping failed"); dev_err(dev, "DMA mapping failed");
err = -EINVAL; err = -EINVAL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment