Commit 237a3a3b authored by Amir Vadai's avatar Amir Vadai Committed by David S. Miller

net/mlx4_en: Fix handling of dma_map failure

Result of skb_frag_dma_map() and dma_map_single() wasn't checked.
Added a check and proper handling in case of failure.
Moved the mapping to the beginning of mlx4_en_xmit(), before updating
the ring data structure to make error handling easier.
Signed-off-by: default avatarAmir Vadai <amirv@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent bd2f631d
...@@ -588,6 +588,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -588,6 +588,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
{ {
struct mlx4_en_priv *priv = netdev_priv(dev); struct mlx4_en_priv *priv = netdev_priv(dev);
struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_dev *mdev = priv->mdev;
struct device *ddev = priv->ddev;
struct mlx4_en_tx_ring *ring; struct mlx4_en_tx_ring *ring;
struct mlx4_en_tx_desc *tx_desc; struct mlx4_en_tx_desc *tx_desc;
struct mlx4_wqe_data_seg *data; struct mlx4_wqe_data_seg *data;
...@@ -674,6 +675,56 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -674,6 +675,56 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
tx_info->skb = skb; tx_info->skb = skb;
tx_info->nr_txbb = nr_txbb; tx_info->nr_txbb = nr_txbb;
if (lso_header_size)
data = ((void *)&tx_desc->lso + ALIGN(lso_header_size + 4,
DS_SIZE));
else
data = &tx_desc->data;
/* valid only for none inline segments */
tx_info->data_offset = (void *)data - (void *)tx_desc;
tx_info->linear = (lso_header_size < skb_headlen(skb) &&
!is_inline(skb, NULL)) ? 1 : 0;
data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
if (is_inline(skb, &fragptr)) {
tx_info->inl = 1;
} else {
/* Map fragments */
for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
frag = &skb_shinfo(skb)->frags[i];
dma = skb_frag_dma_map(ddev, frag,
0, skb_frag_size(frag),
DMA_TO_DEVICE);
if (dma_mapping_error(ddev, dma))
goto tx_drop_unmap;
data->addr = cpu_to_be64(dma);
data->lkey = cpu_to_be32(mdev->mr.key);
wmb();
data->byte_count = cpu_to_be32(skb_frag_size(frag));
--data;
}
/* Map linear part */
if (tx_info->linear) {
u32 byte_count = skb_headlen(skb) - lso_header_size;
dma = dma_map_single(ddev, skb->data +
lso_header_size, byte_count,
PCI_DMA_TODEVICE);
if (dma_mapping_error(ddev, dma))
goto tx_drop_unmap;
data->addr = cpu_to_be64(dma);
data->lkey = cpu_to_be32(mdev->mr.key);
wmb();
data->byte_count = cpu_to_be32(byte_count);
}
tx_info->inl = 0;
}
/* /*
* For timestamping add flag to skb_shinfo and * For timestamping add flag to skb_shinfo and
* set flag for further reference * set flag for further reference
...@@ -720,8 +771,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -720,8 +771,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
/* Copy headers; /* Copy headers;
* note that we already verified that it is linear */ * note that we already verified that it is linear */
memcpy(tx_desc->lso.header, skb->data, lso_header_size); memcpy(tx_desc->lso.header, skb->data, lso_header_size);
data = ((void *) &tx_desc->lso +
ALIGN(lso_header_size + 4, DS_SIZE));
priv->port_stats.tso_packets++; priv->port_stats.tso_packets++;
i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) + i = ((skb->len - lso_header_size) / skb_shinfo(skb)->gso_size) +
...@@ -733,7 +782,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -733,7 +782,6 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
op_own = cpu_to_be32(MLX4_OPCODE_SEND) | op_own = cpu_to_be32(MLX4_OPCODE_SEND) |
((ring->prod & ring->size) ? ((ring->prod & ring->size) ?
cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0); cpu_to_be32(MLX4_EN_BIT_DESC_OWN) : 0);
data = &tx_desc->data;
tx_info->nr_bytes = max_t(unsigned int, skb->len, ETH_ZLEN); tx_info->nr_bytes = max_t(unsigned int, skb->len, ETH_ZLEN);
ring->packets++; ring->packets++;
...@@ -742,38 +790,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -742,38 +790,7 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes); netdev_tx_sent_queue(ring->tx_queue, tx_info->nr_bytes);
AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len); AVG_PERF_COUNTER(priv->pstats.tx_pktsz_avg, skb->len);
if (tx_info->inl) {
/* valid only for none inline segments */
tx_info->data_offset = (void *) data - (void *) tx_desc;
tx_info->linear = (lso_header_size < skb_headlen(skb) && !is_inline(skb, NULL)) ? 1 : 0;
data += skb_shinfo(skb)->nr_frags + tx_info->linear - 1;
if (!is_inline(skb, &fragptr)) {
/* Map fragments */
for (i = skb_shinfo(skb)->nr_frags - 1; i >= 0; i--) {
frag = &skb_shinfo(skb)->frags[i];
dma = skb_frag_dma_map(priv->ddev, frag,
0, skb_frag_size(frag),
DMA_TO_DEVICE);
data->addr = cpu_to_be64(dma);
data->lkey = cpu_to_be32(mdev->mr.key);
wmb();
data->byte_count = cpu_to_be32(skb_frag_size(frag));
--data;
}
/* Map linear part */
if (tx_info->linear) {
dma = dma_map_single(priv->ddev, skb->data + lso_header_size,
skb_headlen(skb) - lso_header_size, PCI_DMA_TODEVICE);
data->addr = cpu_to_be64(dma);
data->lkey = cpu_to_be32(mdev->mr.key);
wmb();
data->byte_count = cpu_to_be32(skb_headlen(skb) - lso_header_size);
}
tx_info->inl = 0;
} else {
build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr); build_inline_wqe(tx_desc, skb, real_size, &vlan_tag, tx_ind, fragptr);
tx_info->inl = 1; tx_info->inl = 1;
} }
...@@ -813,6 +830,16 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -813,6 +830,16 @@ netdev_tx_t mlx4_en_xmit(struct sk_buff *skb, struct net_device *dev)
return NETDEV_TX_OK; return NETDEV_TX_OK;
tx_drop_unmap:
en_err(priv, "DMA mapping error\n");
for (i++; i < skb_shinfo(skb)->nr_frags; i++) {
data++;
dma_unmap_page(ddev, (dma_addr_t) be64_to_cpu(data->addr),
be32_to_cpu(data->byte_count),
PCI_DMA_TODEVICE);
}
tx_drop: tx_drop:
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
priv->stats.tx_dropped++; priv->stats.tx_dropped++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment