Commit 4bce4e5c authored by Maxim Mikityanskiy's avatar Maxim Mikityanskiy Committed by Daniel Borkmann

xsk: Return the whole xdp_desc from xsk_umem_consume_tx

Some drivers want to access the data transmitted in order to implement
acceleration features of the NICs. It is also useful in AF_XDP TX flow.

Change the xsk_umem_consume_tx API to return the whole xdp_desc, that
contains the data pointer, length and DMA address, instead of only the
latter two. Adapt the implementation of i40e and ixgbe to this change.
Signed-off-by: default avatarMaxim Mikityanskiy <maximmi@mellanox.com>
Signed-off-by: default avatarTariq Toukan <tariqt@mellanox.com>
Acked-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
Cc: Björn Töpel <bjorn.topel@intel.com>
Cc: Magnus Karlsson <magnus.karlsson@intel.com>
Acked-by: default avatarBjörn Töpel <bjorn.topel@intel.com>
Signed-off-by: default avatarDaniel Borkmann <daniel@iogearbox.net>
parent 123e8da1
...@@ -641,8 +641,8 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget) ...@@ -641,8 +641,8 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
struct i40e_tx_desc *tx_desc = NULL; struct i40e_tx_desc *tx_desc = NULL;
struct i40e_tx_buffer *tx_bi; struct i40e_tx_buffer *tx_bi;
bool work_done = true; bool work_done = true;
struct xdp_desc desc;
dma_addr_t dma; dma_addr_t dma;
u32 len;
while (budget-- > 0) { while (budget-- > 0) {
if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) { if (!unlikely(I40E_DESC_UNUSED(xdp_ring))) {
...@@ -651,21 +651,23 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget) ...@@ -651,21 +651,23 @@ static bool i40e_xmit_zc(struct i40e_ring *xdp_ring, unsigned int budget)
break; break;
} }
if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &dma, &len)) if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
break; break;
dma_sync_single_for_device(xdp_ring->dev, dma, len, dma = xdp_umem_get_dma(xdp_ring->xsk_umem, desc.addr);
dma_sync_single_for_device(xdp_ring->dev, dma, desc.len,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
tx_bi = &xdp_ring->tx_bi[xdp_ring->next_to_use]; tx_bi = &xdp_ring->tx_bi[xdp_ring->next_to_use];
tx_bi->bytecount = len; tx_bi->bytecount = desc.len;
tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use); tx_desc = I40E_TX_DESC(xdp_ring, xdp_ring->next_to_use);
tx_desc->buffer_addr = cpu_to_le64(dma); tx_desc->buffer_addr = cpu_to_le64(dma);
tx_desc->cmd_type_offset_bsz = tx_desc->cmd_type_offset_bsz =
build_ctob(I40E_TX_DESC_CMD_ICRC build_ctob(I40E_TX_DESC_CMD_ICRC
| I40E_TX_DESC_CMD_EOP, | I40E_TX_DESC_CMD_EOP,
0, len, 0); 0, desc.len, 0);
xdp_ring->next_to_use++; xdp_ring->next_to_use++;
if (xdp_ring->next_to_use == xdp_ring->count) if (xdp_ring->next_to_use == xdp_ring->count)
......
...@@ -571,8 +571,9 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) ...@@ -571,8 +571,9 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
union ixgbe_adv_tx_desc *tx_desc = NULL; union ixgbe_adv_tx_desc *tx_desc = NULL;
struct ixgbe_tx_buffer *tx_bi; struct ixgbe_tx_buffer *tx_bi;
bool work_done = true; bool work_done = true;
u32 len, cmd_type; struct xdp_desc desc;
dma_addr_t dma; dma_addr_t dma;
u32 cmd_type;
while (budget-- > 0) { while (budget-- > 0) {
if (unlikely(!ixgbe_desc_unused(xdp_ring)) || if (unlikely(!ixgbe_desc_unused(xdp_ring)) ||
...@@ -581,14 +582,16 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) ...@@ -581,14 +582,16 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
break; break;
} }
if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &dma, &len)) if (!xsk_umem_consume_tx(xdp_ring->xsk_umem, &desc))
break; break;
dma_sync_single_for_device(xdp_ring->dev, dma, len, dma = xdp_umem_get_dma(xdp_ring->xsk_umem, desc.addr);
dma_sync_single_for_device(xdp_ring->dev, dma, desc.len,
DMA_BIDIRECTIONAL); DMA_BIDIRECTIONAL);
tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use]; tx_bi = &xdp_ring->tx_buffer_info[xdp_ring->next_to_use];
tx_bi->bytecount = len; tx_bi->bytecount = desc.len;
tx_bi->xdpf = NULL; tx_bi->xdpf = NULL;
tx_bi->gso_segs = 1; tx_bi->gso_segs = 1;
...@@ -599,10 +602,10 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget) ...@@ -599,10 +602,10 @@ static bool ixgbe_xmit_zc(struct ixgbe_ring *xdp_ring, unsigned int budget)
cmd_type = IXGBE_ADVTXD_DTYP_DATA | cmd_type = IXGBE_ADVTXD_DTYP_DATA |
IXGBE_ADVTXD_DCMD_DEXT | IXGBE_ADVTXD_DCMD_DEXT |
IXGBE_ADVTXD_DCMD_IFCS; IXGBE_ADVTXD_DCMD_IFCS;
cmd_type |= len | IXGBE_TXD_CMD; cmd_type |= desc.len | IXGBE_TXD_CMD;
tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type); tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type);
tx_desc->read.olinfo_status = tx_desc->read.olinfo_status =
cpu_to_le32(len << IXGBE_ADVTXD_PAYLEN_SHIFT); cpu_to_le32(desc.len << IXGBE_ADVTXD_PAYLEN_SHIFT);
xdp_ring->next_to_use++; xdp_ring->next_to_use++;
if (xdp_ring->next_to_use == xdp_ring->count) if (xdp_ring->next_to_use == xdp_ring->count)
......
...@@ -81,7 +81,7 @@ bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt); ...@@ -81,7 +81,7 @@ bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt);
u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr); u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
void xsk_umem_discard_addr(struct xdp_umem *umem); void xsk_umem_discard_addr(struct xdp_umem *umem);
void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries); void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len); bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc);
void xsk_umem_consume_tx_done(struct xdp_umem *umem); void xsk_umem_consume_tx_done(struct xdp_umem *umem);
struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries); struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries);
struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem, struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
...@@ -175,8 +175,8 @@ static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries) ...@@ -175,8 +175,8 @@ static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
{ {
} }
static inline bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, static inline bool xsk_umem_consume_tx(struct xdp_umem *umem,
u32 *len) struct xdp_desc *desc)
{ {
return false; return false;
} }
......
...@@ -172,22 +172,18 @@ void xsk_umem_consume_tx_done(struct xdp_umem *umem) ...@@ -172,22 +172,18 @@ void xsk_umem_consume_tx_done(struct xdp_umem *umem)
} }
EXPORT_SYMBOL(xsk_umem_consume_tx_done); EXPORT_SYMBOL(xsk_umem_consume_tx_done);
bool xsk_umem_consume_tx(struct xdp_umem *umem, dma_addr_t *dma, u32 *len) bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc)
{ {
struct xdp_desc desc;
struct xdp_sock *xs; struct xdp_sock *xs;
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(xs, &umem->xsk_list, list) { list_for_each_entry_rcu(xs, &umem->xsk_list, list) {
if (!xskq_peek_desc(xs->tx, &desc)) if (!xskq_peek_desc(xs->tx, desc))
continue; continue;
if (xskq_produce_addr_lazy(umem->cq, desc.addr)) if (xskq_produce_addr_lazy(umem->cq, desc->addr))
goto out; goto out;
*dma = xdp_umem_get_dma(umem, desc.addr);
*len = desc.len;
xskq_discard_desc(xs->tx); xskq_discard_desc(xs->tx);
rcu_read_unlock(); rcu_read_unlock();
return true; return true;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment