Commit 154bb2fa authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'ena-driver-xdp-bug-fixes'

David Arinzon says:

====================
ENA driver XDP bug fixes

This patchset contains multiple XDP-related bug fixes
in the ENA driver.
====================

Link: https://lore.kernel.org/r/20231211062801.27891-1-darinzon@amazon.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents f99cd562 4ab138ca
......@@ -328,9 +328,6 @@ static int ena_com_create_and_store_tx_meta_desc(struct ena_com_io_sq *io_sq,
* compare it to the stored version, just create the meta
*/
if (io_sq->disable_meta_caching) {
if (unlikely(!ena_tx_ctx->meta_valid))
return -EINVAL;
*have_meta = true;
return ena_com_create_meta(io_sq, ena_meta);
}
......
......@@ -74,6 +74,8 @@ static void ena_unmap_tx_buff(struct ena_ring *tx_ring,
struct ena_tx_buffer *tx_info);
static int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
int first_index, int count);
static void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
int first_index, int count);
/* Increase a stat by cnt while holding syncp seqlock on 32bit machines */
static void ena_increase_stat(u64 *statp, u64 cnt,
......@@ -457,23 +459,22 @@ static void ena_init_all_xdp_queues(struct ena_adapter *adapter)
static int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter)
{
u32 xdp_first_ring = adapter->xdp_first_ring;
u32 xdp_num_queues = adapter->xdp_num_queues;
int rc = 0;
rc = ena_setup_tx_resources_in_range(adapter, adapter->xdp_first_ring,
adapter->xdp_num_queues);
rc = ena_setup_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues);
if (rc)
goto setup_err;
rc = ena_create_io_tx_queues_in_range(adapter,
adapter->xdp_first_ring,
adapter->xdp_num_queues);
rc = ena_create_io_tx_queues_in_range(adapter, xdp_first_ring, xdp_num_queues);
if (rc)
goto create_err;
return 0;
create_err:
ena_free_all_io_tx_resources(adapter);
ena_free_all_io_tx_resources_in_range(adapter, xdp_first_ring, xdp_num_queues);
setup_err:
return rc;
}
......@@ -1492,11 +1493,6 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
if (unlikely(!skb))
return NULL;
/* sync this buffer for CPU use */
dma_sync_single_for_cpu(rx_ring->dev,
dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset,
len,
DMA_FROM_DEVICE);
skb_copy_to_linear_data(skb, buf_addr + buf_offset, len);
dma_sync_single_for_device(rx_ring->dev,
dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset,
......@@ -1515,17 +1511,10 @@ static struct sk_buff *ena_rx_skb(struct ena_ring *rx_ring,
buf_len = SKB_DATA_ALIGN(len + buf_offset + tailroom);
pre_reuse_paddr = dma_unmap_addr(&rx_info->ena_buf, paddr);
/* If XDP isn't loaded try to reuse part of the RX buffer */
reuse_rx_buf_page = !is_xdp_loaded &&
ena_try_rx_buf_page_reuse(rx_info, buf_len, len, pkt_offset);
dma_sync_single_for_cpu(rx_ring->dev,
pre_reuse_paddr + pkt_offset,
len,
DMA_FROM_DEVICE);
if (!reuse_rx_buf_page)
ena_unmap_rx_buff_attrs(rx_ring, rx_info, DMA_ATTR_SKIP_CPU_SYNC);
......@@ -1671,20 +1660,23 @@ static void ena_set_rx_hash(struct ena_ring *rx_ring,
}
}
static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp)
static int ena_xdp_handle_buff(struct ena_ring *rx_ring, struct xdp_buff *xdp, u16 num_descs)
{
struct ena_rx_buffer *rx_info;
int ret;
/* XDP multi-buffer packets not supported */
if (unlikely(num_descs > 1)) {
netdev_err_once(rx_ring->adapter->netdev,
"xdp: dropped unsupported multi-buffer packets\n");
ena_increase_stat(&rx_ring->rx_stats.xdp_drop, 1, &rx_ring->syncp);
return ENA_XDP_DROP;
}
rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
xdp_prepare_buff(xdp, page_address(rx_info->page),
rx_info->buf_offset,
rx_ring->ena_bufs[0].len, false);
/* If for some reason we received a bigger packet than
* we expect, then we simply drop it
*/
if (unlikely(rx_ring->ena_bufs[0].len > ENA_XDP_MAX_MTU))
return ENA_XDP_DROP;
ret = ena_xdp_execute(rx_ring, xdp);
......@@ -1719,6 +1711,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
int xdp_flags = 0;
int total_len = 0;
int xdp_verdict;
u8 pkt_offset;
int rc = 0;
int i;
......@@ -1745,15 +1738,21 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
/* First descriptor might have an offset set by the device */
rx_info = &rx_ring->rx_buffer_info[rx_ring->ena_bufs[0].req_id];
rx_info->buf_offset += ena_rx_ctx.pkt_offset;
pkt_offset = ena_rx_ctx.pkt_offset;
rx_info->buf_offset += pkt_offset;
netif_dbg(rx_ring->adapter, rx_status, rx_ring->netdev,
"rx_poll: q %d got packet from ena. descs #: %d l3 proto %d l4 proto %d hash: %x\n",
rx_ring->qid, ena_rx_ctx.descs, ena_rx_ctx.l3_proto,
ena_rx_ctx.l4_proto, ena_rx_ctx.hash);
dma_sync_single_for_cpu(rx_ring->dev,
dma_unmap_addr(&rx_info->ena_buf, paddr) + pkt_offset,
rx_ring->ena_bufs[0].len,
DMA_FROM_DEVICE);
if (ena_xdp_present_ring(rx_ring))
xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp);
xdp_verdict = ena_xdp_handle_buff(rx_ring, &xdp, ena_rx_ctx.descs);
/* allocate skb and fill it */
if (xdp_verdict == ENA_XDP_PASS)
......@@ -1777,7 +1776,7 @@ static int ena_clean_rx_irq(struct ena_ring *rx_ring, struct napi_struct *napi,
if (xdp_verdict & ENA_XDP_FORWARDED) {
ena_unmap_rx_buff_attrs(rx_ring,
&rx_ring->rx_buffer_info[req_id],
0);
DMA_ATTR_SKIP_CPU_SYNC);
rx_ring->rx_buffer_info[req_id].page = NULL;
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment