Commit 63a64a56 authored by Tirthendu Sarkar's avatar Tirthendu Sarkar Committed by Alexei Starovoitov

xsk: prepare 'options' in xdp_desc for multi-buffer use

Use the 'options' field in xdp_desc as a packet continuity marker. Since
'options' field was unused till now and was expected to be set to 0, the
'eop' descriptor will have it set to 0, while the non-eop descriptors
will have to set it to 1. This ensures legacy applications continue to
work without needing any change for single-buffer packets.

Add helper functions and extend xskq_prod_reserve_desc() to use the
'options' field.
Signed-off-by: default avatarTirthendu Sarkar <tirthendu.sarkar@intel.com>
Link: https://lore.kernel.org/r/20230719132421.584801-2-maciej.fijalkowski@intel.comSigned-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
parent 492e797f
...@@ -108,4 +108,11 @@ struct xdp_desc { ...@@ -108,4 +108,11 @@ struct xdp_desc {
/* UMEM descriptor is __u64 */ /* UMEM descriptor is __u64 */
/* Flag indicating that the packet continues with the buffer pointed out by the
* next frame in the ring. The end of the packet is signalled by setting this
* bit to zero. For single buffer packets, every descriptor has 'options' set
* to 0 and this maintains backward compatibility.
*/
#define XDP_PKT_CONTD (1 << 0)
#endif /* _LINUX_IF_XDP_H */ #endif /* _LINUX_IF_XDP_H */
...@@ -135,14 +135,14 @@ int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool, ...@@ -135,14 +135,14 @@ int xsk_reg_pool_at_qid(struct net_device *dev, struct xsk_buff_pool *pool,
return 0; return 0;
} }
static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len) static int __xsk_rcv_zc(struct xdp_sock *xs, struct xdp_buff *xdp, u32 len, u32 flags)
{ {
struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp); struct xdp_buff_xsk *xskb = container_of(xdp, struct xdp_buff_xsk, xdp);
u64 addr; u64 addr;
int err; int err;
addr = xp_get_handle(xskb); addr = xp_get_handle(xskb);
err = xskq_prod_reserve_desc(xs->rx, addr, len); err = xskq_prod_reserve_desc(xs->rx, addr, len, flags);
if (err) { if (err) {
xs->rx_queue_full++; xs->rx_queue_full++;
return err; return err;
...@@ -189,7 +189,7 @@ static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) ...@@ -189,7 +189,7 @@ static int __xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
} }
xsk_copy_xdp(xsk_xdp, xdp, len); xsk_copy_xdp(xsk_xdp, xdp, len);
err = __xsk_rcv_zc(xs, xsk_xdp, len); err = __xsk_rcv_zc(xs, xsk_xdp, len, 0);
if (err) { if (err) {
xsk_buff_free(xsk_xdp); xsk_buff_free(xsk_xdp);
return err; return err;
...@@ -259,7 +259,7 @@ static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) ...@@ -259,7 +259,7 @@ static int xsk_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) { if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) {
len = xdp->data_end - xdp->data; len = xdp->data_end - xdp->data;
return __xsk_rcv_zc(xs, xdp, len); return __xsk_rcv_zc(xs, xdp, len, 0);
} }
err = __xsk_rcv(xs, xdp); err = __xsk_rcv(xs, xdp);
......
...@@ -130,6 +130,11 @@ static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr) ...@@ -130,6 +130,11 @@ static inline bool xskq_cons_read_addr_unchecked(struct xsk_queue *q, u64 *addr)
return false; return false;
} }
static inline bool xp_unused_options_set(u32 options)
{
return options & ~XDP_PKT_CONTD;
}
static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool, static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
struct xdp_desc *desc) struct xdp_desc *desc)
{ {
...@@ -141,7 +146,7 @@ static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool, ...@@ -141,7 +146,7 @@ static inline bool xp_aligned_validate_desc(struct xsk_buff_pool *pool,
if (desc->addr >= pool->addrs_cnt) if (desc->addr >= pool->addrs_cnt)
return false; return false;
if (desc->options) if (xp_unused_options_set(desc->options))
return false; return false;
return true; return true;
} }
...@@ -158,7 +163,7 @@ static inline bool xp_unaligned_validate_desc(struct xsk_buff_pool *pool, ...@@ -158,7 +163,7 @@ static inline bool xp_unaligned_validate_desc(struct xsk_buff_pool *pool,
xp_desc_crosses_non_contig_pg(pool, addr, desc->len)) xp_desc_crosses_non_contig_pg(pool, addr, desc->len))
return false; return false;
if (desc->options) if (xp_unused_options_set(desc->options))
return false; return false;
return true; return true;
} }
...@@ -360,7 +365,7 @@ static inline void xskq_prod_write_addr_batch(struct xsk_queue *q, struct xdp_de ...@@ -360,7 +365,7 @@ static inline void xskq_prod_write_addr_batch(struct xsk_queue *q, struct xdp_de
} }
static inline int xskq_prod_reserve_desc(struct xsk_queue *q, static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
u64 addr, u32 len) u64 addr, u32 len, u32 flags)
{ {
struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
u32 idx; u32 idx;
...@@ -372,6 +377,7 @@ static inline int xskq_prod_reserve_desc(struct xsk_queue *q, ...@@ -372,6 +377,7 @@ static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
idx = q->cached_prod++ & q->ring_mask; idx = q->cached_prod++ & q->ring_mask;
ring->desc[idx].addr = addr; ring->desc[idx].addr = addr;
ring->desc[idx].len = len; ring->desc[idx].len = len;
ring->desc[idx].options = flags;
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment