Commit 470bcfd6 authored by Lorenzo Bianconi's avatar Lorenzo Bianconi Committed by Jakub Kicinski

ixgbe: add xdp frags support to ndo_xdp_xmit

Add the capability to map non-linear xdp frames in XDP_TX and ndo_xdp_xmit
callback.
Signed-off-by: default avatarLorenzo Bianconi <lorenzo@kernel.org>
Tested-by: default avatarSandeep Penigalapati <sandeep.penigalapati@intel.com>
Signed-off-by: default avatarTony Nguyen <anthony.l.nguyen@intel.com>
Link: https://lore.kernel.org/r/20220512212621.3746140-1-anthony.l.nguyen@intel.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent c2867816
...@@ -2344,6 +2344,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, ...@@ -2344,6 +2344,7 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
hard_start = page_address(rx_buffer->page) + hard_start = page_address(rx_buffer->page) +
rx_buffer->page_offset - offset; rx_buffer->page_offset - offset;
xdp_prepare_buff(&xdp, hard_start, offset, size, true); xdp_prepare_buff(&xdp, hard_start, offset, size, true);
xdp_buff_clear_frags_flag(&xdp);
#if (PAGE_SIZE > 4096) #if (PAGE_SIZE > 4096)
/* At larger PAGE_SIZE, frame_sz depend on len size */ /* At larger PAGE_SIZE, frame_sz depend on len size */
xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size); xdp.frame_sz = ixgbe_rx_frame_truesize(rx_ring, size);
...@@ -8571,57 +8572,83 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, ...@@ -8571,57 +8572,83 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb,
int ixgbe_xmit_xdp_ring(struct ixgbe_ring *ring, int ixgbe_xmit_xdp_ring(struct ixgbe_ring *ring,
struct xdp_frame *xdpf) struct xdp_frame *xdpf)
{ {
struct ixgbe_tx_buffer *tx_buffer; struct skb_shared_info *sinfo = xdp_get_shared_info_from_frame(xdpf);
union ixgbe_adv_tx_desc *tx_desc; u8 nr_frags = unlikely(xdp_frame_has_frags(xdpf)) ? sinfo->nr_frags : 0;
u32 len, cmd_type; u16 i = 0, index = ring->next_to_use;
dma_addr_t dma; struct ixgbe_tx_buffer *tx_head = &ring->tx_buffer_info[index];
u16 i; struct ixgbe_tx_buffer *tx_buff = tx_head;
union ixgbe_adv_tx_desc *tx_desc = IXGBE_TX_DESC(ring, index);
len = xdpf->len; u32 cmd_type, len = xdpf->len;
void *data = xdpf->data;
if (unlikely(!ixgbe_desc_unused(ring))) if (unlikely(ixgbe_desc_unused(ring) < 1 + nr_frags))
return IXGBE_XDP_CONSUMED; return IXGBE_XDP_CONSUMED;
dma = dma_map_single(ring->dev, xdpf->data, len, DMA_TO_DEVICE); tx_head->bytecount = xdp_get_frame_len(xdpf);
if (dma_mapping_error(ring->dev, dma)) tx_head->gso_segs = 1;
return IXGBE_XDP_CONSUMED; tx_head->xdpf = xdpf;
/* record the location of the first descriptor for this packet */ tx_desc->read.olinfo_status =
tx_buffer = &ring->tx_buffer_info[ring->next_to_use]; cpu_to_le32(tx_head->bytecount << IXGBE_ADVTXD_PAYLEN_SHIFT);
tx_buffer->bytecount = len;
tx_buffer->gso_segs = 1;
tx_buffer->protocol = 0;
i = ring->next_to_use; for (;;) {
tx_desc = IXGBE_TX_DESC(ring, i); dma_addr_t dma;
dma_unmap_len_set(tx_buffer, len, len); dma = dma_map_single(ring->dev, data, len, DMA_TO_DEVICE);
dma_unmap_addr_set(tx_buffer, dma, dma); if (dma_mapping_error(ring->dev, dma))
tx_buffer->xdpf = xdpf; goto unmap;
tx_desc->read.buffer_addr = cpu_to_le64(dma); dma_unmap_len_set(tx_buff, len, len);
dma_unmap_addr_set(tx_buff, dma, dma);
cmd_type = IXGBE_ADVTXD_DTYP_DATA | IXGBE_ADVTXD_DCMD_DEXT |
IXGBE_ADVTXD_DCMD_IFCS | len;
tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
tx_desc->read.buffer_addr = cpu_to_le64(dma);
tx_buff->protocol = 0;
if (++index == ring->count)
index = 0;
if (i == nr_frags)
break;
tx_buff = &ring->tx_buffer_info[index];
tx_desc = IXGBE_TX_DESC(ring, index);
tx_desc->read.olinfo_status = 0;
data = skb_frag_address(&sinfo->frags[i]);
len = skb_frag_size(&sinfo->frags[i]);
i++;
}
/* put descriptor type bits */ /* put descriptor type bits */
cmd_type = IXGBE_ADVTXD_DTYP_DATA | tx_desc->read.cmd_type_len |= cpu_to_le32(IXGBE_TXD_CMD);
IXGBE_ADVTXD_DCMD_DEXT |
IXGBE_ADVTXD_DCMD_IFCS;
cmd_type |= len | IXGBE_TXD_CMD;
tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
tx_desc->read.olinfo_status =
cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT);
/* Avoid any potential race with xdp_xmit and cleanup */ /* Avoid any potential race with xdp_xmit and cleanup */
smp_wmb(); smp_wmb();
/* set next_to_watch value indicating a packet is present */ tx_head->next_to_watch = tx_desc;
i++; ring->next_to_use = index;
if (i == ring->count)
i = 0;
tx_buffer->next_to_watch = tx_desc;
ring->next_to_use = i;
return IXGBE_XDP_TX; return IXGBE_XDP_TX;
unmap:
for (;;) {
tx_buff = &ring->tx_buffer_info[index];
if (dma_unmap_len(tx_buff, len))
dma_unmap_page(ring->dev, dma_unmap_addr(tx_buff, dma),
dma_unmap_len(tx_buff, len),
DMA_TO_DEVICE);
dma_unmap_len_set(tx_buff, len, 0);
if (tx_buff == tx_head)
break;
if (!index)
index += ring->count;
index--;
}
return IXGBE_XDP_CONSUMED;
} }
netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment