Commit 03896ef1 authored by Magnus Karlsson's avatar Magnus Karlsson Committed by Alexei Starovoitov

xsk: Change names of validation functions

Change the names of the validation functions to better reflect what
they are doing. The uppermost ones are reading entries from the rings
and only the bottom ones validate entries. So xskq_cons_read_ is a
better prefix name.

Also change the xskq_cons_read_ functions to return a bool
as the the descriptor or address is already returned by reference
in the parameters. Everyone is using the return value as a bool
anyway.
Signed-off-by: default avatarMagnus Karlsson <magnus.karlsson@intel.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/1576759171-28550-9-git-send-email-magnus.karlsson@intel.com
parent c5ed924b
...@@ -118,7 +118,7 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp); ...@@ -118,7 +118,7 @@ int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs); bool xsk_is_setup_for_bpf_map(struct xdp_sock *xs);
/* Used from netdev driver */ /* Used from netdev driver */
bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt); bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt);
u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr); bool xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
void xsk_umem_discard_addr(struct xdp_umem *umem); void xsk_umem_discard_addr(struct xdp_umem *umem);
void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries); void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc); bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc);
...@@ -197,7 +197,7 @@ static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt) ...@@ -197,7 +197,7 @@ static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
return xsk_umem_has_addrs(umem, cnt - rq->length); return xsk_umem_has_addrs(umem, cnt - rq->length);
} }
static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr) static inline bool xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
{ {
struct xdp_umem_fq_reuse *rq = umem->fq_reuse; struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
......
...@@ -45,7 +45,7 @@ bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt) ...@@ -45,7 +45,7 @@ bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt)
} }
EXPORT_SYMBOL(xsk_umem_has_addrs); EXPORT_SYMBOL(xsk_umem_has_addrs);
u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr) bool xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
{ {
return xskq_cons_peek_addr(umem->fq, addr, umem); return xskq_cons_peek_addr(umem->fq, addr, umem);
} }
...@@ -126,7 +126,7 @@ static void __xsk_rcv_memcpy(struct xdp_umem *umem, u64 addr, void *from_buf, ...@@ -126,7 +126,7 @@ static void __xsk_rcv_memcpy(struct xdp_umem *umem, u64 addr, void *from_buf,
void *to_buf = xdp_umem_get_data(umem, addr); void *to_buf = xdp_umem_get_data(umem, addr);
addr = xsk_umem_add_offset_to_addr(addr); addr = xsk_umem_add_offset_to_addr(addr);
if (xskq_crosses_non_contig_pg(umem, addr, len + metalen)) { if (xskq_cons_crosses_non_contig_pg(umem, addr, len + metalen)) {
void *next_pg_addr = umem->pages[(addr >> PAGE_SHIFT) + 1].addr; void *next_pg_addr = umem->pages[(addr >> PAGE_SHIFT) + 1].addr;
u64 page_start = addr & ~(PAGE_SIZE - 1); u64 page_start = addr & ~(PAGE_SIZE - 1);
u64 first_len = PAGE_SIZE - (addr - page_start); u64 first_len = PAGE_SIZE - (addr - page_start);
......
...@@ -136,8 +136,9 @@ static inline bool xskq_cons_has_entries(struct xsk_queue *q, u32 cnt) ...@@ -136,8 +136,9 @@ static inline bool xskq_cons_has_entries(struct xsk_queue *q, u32 cnt)
/* UMEM queue */ /* UMEM queue */
static inline bool xskq_crosses_non_contig_pg(struct xdp_umem *umem, u64 addr, static inline bool xskq_cons_crosses_non_contig_pg(struct xdp_umem *umem,
u64 length) u64 addr,
u64 length)
{ {
bool cross_pg = (addr & (PAGE_SIZE - 1)) + length > PAGE_SIZE; bool cross_pg = (addr & (PAGE_SIZE - 1)) + length > PAGE_SIZE;
bool next_pg_contig = bool next_pg_contig =
...@@ -147,7 +148,7 @@ static inline bool xskq_crosses_non_contig_pg(struct xdp_umem *umem, u64 addr, ...@@ -147,7 +148,7 @@ static inline bool xskq_crosses_non_contig_pg(struct xdp_umem *umem, u64 addr,
return cross_pg && !next_pg_contig; return cross_pg && !next_pg_contig;
} }
static inline bool xskq_is_valid_addr(struct xsk_queue *q, u64 addr) static inline bool xskq_cons_is_valid_addr(struct xsk_queue *q, u64 addr)
{ {
if (addr >= q->size) { if (addr >= q->size) {
q->invalid_descs++; q->invalid_descs++;
...@@ -157,7 +158,8 @@ static inline bool xskq_is_valid_addr(struct xsk_queue *q, u64 addr) ...@@ -157,7 +158,8 @@ static inline bool xskq_is_valid_addr(struct xsk_queue *q, u64 addr)
return true; return true;
} }
static inline bool xskq_is_valid_addr_unaligned(struct xsk_queue *q, u64 addr, static inline bool xskq_cons_is_valid_unaligned(struct xsk_queue *q,
u64 addr,
u64 length, u64 length,
struct xdp_umem *umem) struct xdp_umem *umem)
{ {
...@@ -165,7 +167,7 @@ static inline bool xskq_is_valid_addr_unaligned(struct xsk_queue *q, u64 addr, ...@@ -165,7 +167,7 @@ static inline bool xskq_is_valid_addr_unaligned(struct xsk_queue *q, u64 addr,
addr = xsk_umem_add_offset_to_addr(addr); addr = xsk_umem_add_offset_to_addr(addr);
if (base_addr >= q->size || addr >= q->size || if (base_addr >= q->size || addr >= q->size ||
xskq_crosses_non_contig_pg(umem, addr, length)) { xskq_cons_crosses_non_contig_pg(umem, addr, length)) {
q->invalid_descs++; q->invalid_descs++;
return false; return false;
} }
...@@ -173,8 +175,8 @@ static inline bool xskq_is_valid_addr_unaligned(struct xsk_queue *q, u64 addr, ...@@ -173,8 +175,8 @@ static inline bool xskq_is_valid_addr_unaligned(struct xsk_queue *q, u64 addr,
return true; return true;
} }
static inline u64 *xskq_validate_addr(struct xsk_queue *q, u64 *addr, static inline bool xskq_cons_read_addr(struct xsk_queue *q, u64 *addr,
struct xdp_umem *umem) struct xdp_umem *umem)
{ {
struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring; struct xdp_umem_ring *ring = (struct xdp_umem_ring *)q->ring;
...@@ -184,29 +186,29 @@ static inline u64 *xskq_validate_addr(struct xsk_queue *q, u64 *addr, ...@@ -184,29 +186,29 @@ static inline u64 *xskq_validate_addr(struct xsk_queue *q, u64 *addr,
*addr = READ_ONCE(ring->desc[idx]) & q->chunk_mask; *addr = READ_ONCE(ring->desc[idx]) & q->chunk_mask;
if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG) { if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG) {
if (xskq_is_valid_addr_unaligned(q, *addr, if (xskq_cons_is_valid_unaligned(q, *addr,
umem->chunk_size_nohr, umem->chunk_size_nohr,
umem)) umem))
return addr; return true;
goto out; goto out;
} }
if (xskq_is_valid_addr(q, *addr)) if (xskq_cons_is_valid_addr(q, *addr))
return addr; return true;
out: out:
q->cached_cons++; q->cached_cons++;
} }
return NULL; return false;
} }
static inline u64 *xskq_cons_peek_addr(struct xsk_queue *q, u64 *addr, static inline bool xskq_cons_peek_addr(struct xsk_queue *q, u64 *addr,
struct xdp_umem *umem) struct xdp_umem *umem)
{ {
if (q->cached_prod == q->cached_cons) if (q->cached_prod == q->cached_cons)
xskq_cons_get_entries(q); xskq_cons_get_entries(q);
return xskq_validate_addr(q, addr, umem); return xskq_cons_read_addr(q, addr, umem);
} }
static inline void xskq_cons_release(struct xsk_queue *q) static inline void xskq_cons_release(struct xsk_queue *q)
...@@ -270,11 +272,12 @@ static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries) ...@@ -270,11 +272,12 @@ static inline void xskq_prod_submit_n(struct xsk_queue *q, u32 nb_entries)
/* Rx/Tx queue */ /* Rx/Tx queue */
static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d, static inline bool xskq_cons_is_valid_desc(struct xsk_queue *q,
struct xdp_umem *umem) struct xdp_desc *d,
struct xdp_umem *umem)
{ {
if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG) { if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG) {
if (!xskq_is_valid_addr_unaligned(q, d->addr, d->len, umem)) if (!xskq_cons_is_valid_unaligned(q, d->addr, d->len, umem))
return false; return false;
if (d->len > umem->chunk_size_nohr || d->options) { if (d->len > umem->chunk_size_nohr || d->options) {
...@@ -285,7 +288,7 @@ static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d, ...@@ -285,7 +288,7 @@ static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d,
return true; return true;
} }
if (!xskq_is_valid_addr(q, d->addr)) if (!xskq_cons_is_valid_addr(q, d->addr))
return false; return false;
if (((d->addr + d->len) & q->chunk_mask) != (d->addr & q->chunk_mask) || if (((d->addr + d->len) & q->chunk_mask) != (d->addr & q->chunk_mask) ||
...@@ -297,31 +300,31 @@ static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d, ...@@ -297,31 +300,31 @@ static inline bool xskq_is_valid_desc(struct xsk_queue *q, struct xdp_desc *d,
return true; return true;
} }
static inline struct xdp_desc *xskq_validate_desc(struct xsk_queue *q, static inline bool xskq_cons_read_desc(struct xsk_queue *q,
struct xdp_desc *desc, struct xdp_desc *desc,
struct xdp_umem *umem) struct xdp_umem *umem)
{ {
while (q->cached_cons != q->cached_prod) { while (q->cached_cons != q->cached_prod) {
struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring; struct xdp_rxtx_ring *ring = (struct xdp_rxtx_ring *)q->ring;
u32 idx = q->cached_cons & q->ring_mask; u32 idx = q->cached_cons & q->ring_mask;
*desc = READ_ONCE(ring->desc[idx]); *desc = READ_ONCE(ring->desc[idx]);
if (xskq_is_valid_desc(q, desc, umem)) if (xskq_cons_is_valid_desc(q, desc, umem))
return desc; return true;
q->cached_cons++; q->cached_cons++;
} }
return NULL; return false;
} }
static inline struct xdp_desc *xskq_cons_peek_desc(struct xsk_queue *q, static inline bool xskq_cons_peek_desc(struct xsk_queue *q,
struct xdp_desc *desc, struct xdp_desc *desc,
struct xdp_umem *umem) struct xdp_umem *umem)
{ {
if (q->cached_prod == q->cached_cons) if (q->cached_prod == q->cached_cons)
xskq_cons_get_entries(q); xskq_cons_get_entries(q);
return xskq_validate_desc(q, desc, umem); return xskq_cons_read_desc(q, desc, umem);
} }
static inline int xskq_prod_reserve_desc(struct xsk_queue *q, static inline int xskq_prod_reserve_desc(struct xsk_queue *q,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment