Commit 175fc430 authored by Björn Töpel's avatar Björn Töpel Committed by Alexei Starovoitov

ice, xsk: Migrate to new MEM_TYPE_XSK_BUFF_POOL

Remove MEM_TYPE_ZERO_COPY in favor of the new MEM_TYPE_XSK_BUFF_POOL
APIs.

v4->v5: Fixed "warning: Excess function parameter 'alloc' description
        in 'ice_alloc_rx_bufs_zc'" and "warning: Excess function
        parameter 'xdp' description in
        'ice_construct_skb_zc'". (Jakub)
Signed-off-by: default avatarMaciej Fijalkowski <maciej.fijalkowski@intel.com>
Signed-off-by: default avatarBjörn Töpel <bjorn.topel@intel.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Cc: intel-wired-lan@lists.osuosl.org
Link: https://lore.kernel.org/bpf/20200520192103.355233-10-bjorn.topel@gmail.com
parent 3b4f0b66
// SPDX-License-Identifier: GPL-2.0 // SPDX-License-Identifier: GPL-2.0
/* Copyright (c) 2019, Intel Corporation. */ /* Copyright (c) 2019, Intel Corporation. */
#include <net/xdp_sock_drv.h>
#include "ice_base.h" #include "ice_base.h"
#include "ice_dcb_lib.h" #include "ice_dcb_lib.h"
...@@ -308,24 +309,23 @@ int ice_setup_rx_ctx(struct ice_ring *ring) ...@@ -308,24 +309,23 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
if (ring->xsk_umem) { if (ring->xsk_umem) {
xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq); xdp_rxq_info_unreg_mem_model(&ring->xdp_rxq);
ring->rx_buf_len = ring->xsk_umem->chunk_size_nohr - ring->rx_buf_len =
XDP_PACKET_HEADROOM; xsk_umem_get_rx_frame_size(ring->xsk_umem);
/* For AF_XDP ZC, we disallow packets to span on /* For AF_XDP ZC, we disallow packets to span on
* multiple buffers, thus letting us skip that * multiple buffers, thus letting us skip that
* handling in the fast-path. * handling in the fast-path.
*/ */
chain_len = 1; chain_len = 1;
ring->zca.free = ice_zca_free;
err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq, err = xdp_rxq_info_reg_mem_model(&ring->xdp_rxq,
MEM_TYPE_ZERO_COPY, MEM_TYPE_XSK_BUFF_POOL,
&ring->zca); NULL);
if (err) if (err)
return err; return err;
xsk_buff_set_rxq_info(ring->xsk_umem, &ring->xdp_rxq);
dev_info(ice_pf_to_dev(vsi->back), "Registered XDP mem model MEM_TYPE_ZERO_COPY on Rx ring %d\n", dev_info(ice_pf_to_dev(vsi->back), "Registered XDP mem model MEM_TYPE_XSK_BUFF_POOL on Rx ring %d\n",
ring->q_index); ring->q_index);
} else { } else {
ring->zca.free = NULL;
if (!xdp_rxq_info_is_reg(&ring->xdp_rxq)) if (!xdp_rxq_info_is_reg(&ring->xdp_rxq))
/* coverity[check_return] */ /* coverity[check_return] */
xdp_rxq_info_reg(&ring->xdp_rxq, xdp_rxq_info_reg(&ring->xdp_rxq,
...@@ -426,7 +426,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring) ...@@ -426,7 +426,7 @@ int ice_setup_rx_ctx(struct ice_ring *ring)
writel(0, ring->tail); writel(0, ring->tail);
err = ring->xsk_umem ? err = ring->xsk_umem ?
ice_alloc_rx_bufs_slow_zc(ring, ICE_DESC_UNUSED(ring)) : ice_alloc_rx_bufs_zc(ring, ICE_DESC_UNUSED(ring)) :
ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring)); ice_alloc_rx_bufs(ring, ICE_DESC_UNUSED(ring));
if (err) if (err)
dev_info(ice_pf_to_dev(vsi->back), "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n", dev_info(ice_pf_to_dev(vsi->back), "Failed allocate some buffers on %sRx ring %d (pf_q %d)\n",
......
...@@ -155,17 +155,16 @@ struct ice_tx_offload_params { ...@@ -155,17 +155,16 @@ struct ice_tx_offload_params {
}; };
struct ice_rx_buf { struct ice_rx_buf {
struct sk_buff *skb;
dma_addr_t dma;
union { union {
struct { struct {
struct sk_buff *skb;
dma_addr_t dma;
struct page *page; struct page *page;
unsigned int page_offset; unsigned int page_offset;
u16 pagecnt_bias; u16 pagecnt_bias;
}; };
struct { struct {
void *addr; struct xdp_buff *xdp;
u64 handle;
}; };
}; };
}; };
...@@ -289,7 +288,6 @@ struct ice_ring { ...@@ -289,7 +288,6 @@ struct ice_ring {
struct rcu_head rcu; /* to avoid race on free */ struct rcu_head rcu; /* to avoid race on free */
struct bpf_prog *xdp_prog; struct bpf_prog *xdp_prog;
struct xdp_umem *xsk_umem; struct xdp_umem *xsk_umem;
struct zero_copy_allocator zca;
/* CL3 - 3rd cacheline starts here */ /* CL3 - 3rd cacheline starts here */
struct xdp_rxq_info xdp_rxq; struct xdp_rxq_info xdp_rxq;
/* CLX - the below items are only accessed infrequently and should be /* CLX - the below items are only accessed infrequently and should be
......
This diff is collapsed.
...@@ -10,11 +10,10 @@ struct ice_vsi; ...@@ -10,11 +10,10 @@ struct ice_vsi;
#ifdef CONFIG_XDP_SOCKETS #ifdef CONFIG_XDP_SOCKETS
int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid); int ice_xsk_umem_setup(struct ice_vsi *vsi, struct xdp_umem *umem, u16 qid);
void ice_zca_free(struct zero_copy_allocator *zca, unsigned long handle);
int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget); int ice_clean_rx_irq_zc(struct ice_ring *rx_ring, int budget);
bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget); bool ice_clean_tx_irq_zc(struct ice_ring *xdp_ring, int budget);
int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags); int ice_xsk_wakeup(struct net_device *netdev, u32 queue_id, u32 flags);
bool ice_alloc_rx_bufs_slow_zc(struct ice_ring *rx_ring, u16 count); bool ice_alloc_rx_bufs_zc(struct ice_ring *rx_ring, u16 count);
bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi); bool ice_xsk_any_rx_ring_ena(struct ice_vsi *vsi);
void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring); void ice_xsk_clean_rx_ring(struct ice_ring *rx_ring);
void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring); void ice_xsk_clean_xdp_ring(struct ice_ring *xdp_ring);
...@@ -27,12 +26,6 @@ ice_xsk_umem_setup(struct ice_vsi __always_unused *vsi, ...@@ -27,12 +26,6 @@ ice_xsk_umem_setup(struct ice_vsi __always_unused *vsi,
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
static inline void
ice_zca_free(struct zero_copy_allocator __always_unused *zca,
unsigned long __always_unused handle)
{
}
static inline int static inline int
ice_clean_rx_irq_zc(struct ice_ring __always_unused *rx_ring, ice_clean_rx_irq_zc(struct ice_ring __always_unused *rx_ring,
int __always_unused budget) int __always_unused budget)
...@@ -48,8 +41,8 @@ ice_clean_tx_irq_zc(struct ice_ring __always_unused *xdp_ring, ...@@ -48,8 +41,8 @@ ice_clean_tx_irq_zc(struct ice_ring __always_unused *xdp_ring,
} }
static inline bool static inline bool
ice_alloc_rx_bufs_slow_zc(struct ice_ring __always_unused *rx_ring, ice_alloc_rx_bufs_zc(struct ice_ring __always_unused *rx_ring,
u16 __always_unused count) u16 __always_unused count)
{ {
return false; return false;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment