Commit a71506a4 authored by Magnus Karlsson's avatar Magnus Karlsson Committed by Alexei Starovoitov

xsk: Move driver interface to xdp_sock_drv.h

Move the AF_XDP zero-copy driver interface to its own include file
called xdp_sock_drv.h. This, hopefully, will make it more clear for
NIC driver implementors to know what functions to use for zero-copy
support.

v4->v5: Fix -Wmissing-prototypes by include header file. (Jakub)
Signed-off-by: default avatarMagnus Karlsson <magnus.karlsson@intel.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200520192103.355233-4-bjorn.topel@gmail.com
parent d20a1676
...@@ -11,7 +11,7 @@ ...@@ -11,7 +11,7 @@
#include "i40e_diag.h" #include "i40e_diag.h"
#include "i40e_xsk.h" #include "i40e_xsk.h"
#include <net/udp_tunnel.h> #include <net/udp_tunnel.h>
#include <net/xdp_sock.h> #include <net/xdp_sock_drv.h>
/* All i40e tracepoints are defined by the include below, which /* All i40e tracepoints are defined by the include below, which
* must be included exactly once across the whole kernel with * must be included exactly once across the whole kernel with
* CREATE_TRACE_POINTS defined * CREATE_TRACE_POINTS defined
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
/* Copyright(c) 2018 Intel Corporation. */ /* Copyright(c) 2018 Intel Corporation. */
#include <linux/bpf_trace.h> #include <linux/bpf_trace.h>
#include <net/xdp_sock.h> #include <net/xdp_sock_drv.h>
#include <net/xdp.h> #include <net/xdp.h>
#include "i40e.h" #include "i40e.h"
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
/* Copyright (c) 2019, Intel Corporation. */ /* Copyright (c) 2019, Intel Corporation. */
#include <linux/bpf_trace.h> #include <linux/bpf_trace.h>
#include <net/xdp_sock.h> #include <net/xdp_sock_drv.h>
#include <net/xdp.h> #include <net/xdp.h>
#include "ice.h" #include "ice.h"
#include "ice_base.h" #include "ice_base.h"
......
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
/* Copyright(c) 2018 Intel Corporation. */ /* Copyright(c) 2018 Intel Corporation. */
#include <linux/bpf_trace.h> #include <linux/bpf_trace.h>
#include <net/xdp_sock.h> #include <net/xdp_sock_drv.h>
#include <net/xdp.h> #include <net/xdp.h>
#include "ixgbe.h" #include "ixgbe.h"
......
...@@ -31,7 +31,7 @@ ...@@ -31,7 +31,7 @@
*/ */
#include <linux/bpf_trace.h> #include <linux/bpf_trace.h>
#include <net/xdp_sock.h> #include <net/xdp_sock_drv.h>
#include "en/xdp.h" #include "en/xdp.h"
#include "en/params.h" #include "en/params.h"
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
#define __MLX5_EN_XSK_RX_H__ #define __MLX5_EN_XSK_RX_H__
#include "en.h" #include "en.h"
#include <net/xdp_sock.h> #include <net/xdp_sock_drv.h>
/* RX data path */ /* RX data path */
......
...@@ -5,7 +5,7 @@ ...@@ -5,7 +5,7 @@
#define __MLX5_EN_XSK_TX_H__ #define __MLX5_EN_XSK_TX_H__
#include "en.h" #include "en.h"
#include <net/xdp_sock.h> #include <net/xdp_sock_drv.h>
/* TX data path */ /* TX data path */
......
// SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB // SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB
/* Copyright (c) 2019 Mellanox Technologies. */ /* Copyright (c) 2019 Mellanox Technologies. */
#include <net/xdp_sock.h> #include <net/xdp_sock_drv.h>
#include "umem.h" #include "umem.h"
#include "setup.h" #include "setup.h"
#include "en/params.h" #include "en/params.h"
......
...@@ -15,6 +15,7 @@ ...@@ -15,6 +15,7 @@
struct net_device; struct net_device;
struct xsk_queue; struct xsk_queue;
struct xdp_buff;
/* Masks for xdp_umem_page flags. /* Masks for xdp_umem_page flags.
* The low 12-bits of the addr will be 0 since this is the page address, so we * The low 12-bits of the addr will be 0 since this is the page address, so we
...@@ -101,27 +102,9 @@ struct xdp_sock { ...@@ -101,27 +102,9 @@ struct xdp_sock {
spinlock_t map_list_lock; spinlock_t map_list_lock;
}; };
struct xdp_buff;
#ifdef CONFIG_XDP_SOCKETS #ifdef CONFIG_XDP_SOCKETS
int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
/* Used from netdev driver */
bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt);
bool xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
void xsk_umem_release_addr(struct xdp_umem *umem);
void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc);
void xsk_umem_consume_tx_done(struct xdp_umem *umem);
struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries);
struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
struct xdp_umem_fq_reuse *newq);
void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq);
struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id);
void xsk_set_rx_need_wakeup(struct xdp_umem *umem);
void xsk_set_tx_need_wakeup(struct xdp_umem *umem);
void xsk_clear_rx_need_wakeup(struct xdp_umem *umem);
void xsk_clear_tx_need_wakeup(struct xdp_umem *umem);
bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem);
int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp);
int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp); int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp);
void __xsk_map_flush(void); void __xsk_map_flush(void);
...@@ -153,131 +136,24 @@ static inline u64 xsk_umem_add_offset_to_addr(u64 addr) ...@@ -153,131 +136,24 @@ static inline u64 xsk_umem_add_offset_to_addr(u64 addr)
return xsk_umem_extract_addr(addr) + xsk_umem_extract_offset(addr); return xsk_umem_extract_addr(addr) + xsk_umem_extract_offset(addr);
} }
static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
{
unsigned long page_addr;
addr = xsk_umem_add_offset_to_addr(addr);
page_addr = (unsigned long)umem->pages[addr >> PAGE_SHIFT].addr;
return (char *)(page_addr & PAGE_MASK) + (addr & ~PAGE_MASK);
}
static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
{
addr = xsk_umem_add_offset_to_addr(addr);
return umem->pages[addr >> PAGE_SHIFT].dma + (addr & ~PAGE_MASK);
}
/* Reuse-queue aware version of FILL queue helpers */
static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
{
struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
if (rq->length >= cnt)
return true;
return xsk_umem_has_addrs(umem, cnt - rq->length);
}
static inline bool xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
{
struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
if (!rq->length)
return xsk_umem_peek_addr(umem, addr);
*addr = rq->handles[rq->length - 1];
return addr;
}
static inline void xsk_umem_release_addr_rq(struct xdp_umem *umem)
{
struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
if (!rq->length)
xsk_umem_release_addr(umem);
else
rq->length--;
}
static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
{
struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
rq->handles[rq->length++] = addr;
}
/* Handle the offset appropriately depending on aligned or unaligned mode.
* For unaligned mode, we store the offset in the upper 16-bits of the address.
* For aligned mode, we simply add the offset to the address.
*/
static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 address,
u64 offset)
{
if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG)
return address + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
else
return address + offset;
}
static inline u32 xsk_umem_xdp_frame_sz(struct xdp_umem *umem)
{
return umem->chunk_size_nohr;
}
#else #else
static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp) static inline int xsk_generic_rcv(struct xdp_sock *xs, struct xdp_buff *xdp)
{ {
return -ENOTSUPP; return -ENOTSUPP;
} }
static inline bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt) static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
{
return false;
}
static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
{
return NULL;
}
static inline void xsk_umem_release_addr(struct xdp_umem *umem)
{
}
static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
{
}
static inline bool xsk_umem_consume_tx(struct xdp_umem *umem,
struct xdp_desc *desc)
{
return false;
}
static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem)
{
}
static inline struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries)
{ {
return NULL; return -EOPNOTSUPP;
} }
static inline struct xdp_umem_fq_reuse *xsk_reuseq_swap( static inline void __xsk_map_flush(void)
struct xdp_umem *umem,
struct xdp_umem_fq_reuse *newq)
{
return NULL;
}
static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
{ {
} }
static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
u16 queue_id) u32 key)
{ {
return NULL; return NULL;
} }
...@@ -297,80 +173,6 @@ static inline u64 xsk_umem_add_offset_to_addr(u64 addr) ...@@ -297,80 +173,6 @@ static inline u64 xsk_umem_add_offset_to_addr(u64 addr)
return 0; return 0;
} }
static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
{
return NULL;
}
static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
{
return 0;
}
static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
{
return false;
}
static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
{
return NULL;
}
static inline void xsk_umem_release_addr_rq(struct xdp_umem *umem)
{
}
static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
{
}
static inline void xsk_set_rx_need_wakeup(struct xdp_umem *umem)
{
}
static inline void xsk_set_tx_need_wakeup(struct xdp_umem *umem)
{
}
static inline void xsk_clear_rx_need_wakeup(struct xdp_umem *umem)
{
}
static inline void xsk_clear_tx_need_wakeup(struct xdp_umem *umem)
{
}
static inline bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem)
{
return false;
}
static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 handle,
u64 offset)
{
return 0;
}
static inline u32 xsk_umem_xdp_frame_sz(struct xdp_umem *umem)
{
return 0;
}
static inline int __xsk_map_redirect(struct xdp_sock *xs, struct xdp_buff *xdp)
{
return -EOPNOTSUPP;
}
static inline void __xsk_map_flush(void)
{
}
static inline struct xdp_sock *__xsk_map_lookup_elem(struct bpf_map *map,
u32 key)
{
return NULL;
}
#endif /* CONFIG_XDP_SOCKETS */ #endif /* CONFIG_XDP_SOCKETS */
#endif /* _LINUX_XDP_SOCK_H */ #endif /* _LINUX_XDP_SOCK_H */
/* SPDX-License-Identifier: GPL-2.0 */
/* Interface for implementing AF_XDP zero-copy support in drivers.
* Copyright(c) 2020 Intel Corporation.
*/
#ifndef _LINUX_XDP_SOCK_DRV_H
#define _LINUX_XDP_SOCK_DRV_H
#include <net/xdp_sock.h>
#ifdef CONFIG_XDP_SOCKETS
bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt);
bool xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr);
void xsk_umem_release_addr(struct xdp_umem *umem);
void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries);
bool xsk_umem_consume_tx(struct xdp_umem *umem, struct xdp_desc *desc);
void xsk_umem_consume_tx_done(struct xdp_umem *umem);
struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries);
struct xdp_umem_fq_reuse *xsk_reuseq_swap(struct xdp_umem *umem,
struct xdp_umem_fq_reuse *newq);
void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq);
struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev, u16 queue_id);
void xsk_set_rx_need_wakeup(struct xdp_umem *umem);
void xsk_set_tx_need_wakeup(struct xdp_umem *umem);
void xsk_clear_rx_need_wakeup(struct xdp_umem *umem);
void xsk_clear_tx_need_wakeup(struct xdp_umem *umem);
bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem);
static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
{
unsigned long page_addr;
addr = xsk_umem_add_offset_to_addr(addr);
page_addr = (unsigned long)umem->pages[addr >> PAGE_SHIFT].addr;
return (char *)(page_addr & PAGE_MASK) + (addr & ~PAGE_MASK);
}
static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
{
addr = xsk_umem_add_offset_to_addr(addr);
return umem->pages[addr >> PAGE_SHIFT].dma + (addr & ~PAGE_MASK);
}
/* Reuse-queue aware version of FILL queue helpers */
static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
{
struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
if (rq->length >= cnt)
return true;
return xsk_umem_has_addrs(umem, cnt - rq->length);
}
static inline bool xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
{
struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
if (!rq->length)
return xsk_umem_peek_addr(umem, addr);
*addr = rq->handles[rq->length - 1];
return addr;
}
static inline void xsk_umem_release_addr_rq(struct xdp_umem *umem)
{
struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
if (!rq->length)
xsk_umem_release_addr(umem);
else
rq->length--;
}
static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
{
struct xdp_umem_fq_reuse *rq = umem->fq_reuse;
rq->handles[rq->length++] = addr;
}
/* Handle the offset appropriately depending on aligned or unaligned mode.
* For unaligned mode, we store the offset in the upper 16-bits of the address.
* For aligned mode, we simply add the offset to the address.
*/
static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 address,
u64 offset)
{
if (umem->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG)
return address + (offset << XSK_UNALIGNED_BUF_OFFSET_SHIFT);
else
return address + offset;
}
static inline u32 xsk_umem_xdp_frame_sz(struct xdp_umem *umem)
{
return umem->chunk_size_nohr;
}
#else
static inline bool xsk_umem_has_addrs(struct xdp_umem *umem, u32 cnt)
{
return false;
}
static inline u64 *xsk_umem_peek_addr(struct xdp_umem *umem, u64 *addr)
{
return NULL;
}
static inline void xsk_umem_release_addr(struct xdp_umem *umem)
{
}
static inline void xsk_umem_complete_tx(struct xdp_umem *umem, u32 nb_entries)
{
}
static inline bool xsk_umem_consume_tx(struct xdp_umem *umem,
struct xdp_desc *desc)
{
return false;
}
static inline void xsk_umem_consume_tx_done(struct xdp_umem *umem)
{
}
static inline struct xdp_umem_fq_reuse *xsk_reuseq_prepare(u32 nentries)
{
return NULL;
}
static inline struct xdp_umem_fq_reuse *xsk_reuseq_swap(
struct xdp_umem *umem, struct xdp_umem_fq_reuse *newq)
{
return NULL;
}
static inline void xsk_reuseq_free(struct xdp_umem_fq_reuse *rq)
{
}
static inline struct xdp_umem *xdp_get_umem_from_qid(struct net_device *dev,
u16 queue_id)
{
return NULL;
}
static inline char *xdp_umem_get_data(struct xdp_umem *umem, u64 addr)
{
return NULL;
}
static inline dma_addr_t xdp_umem_get_dma(struct xdp_umem *umem, u64 addr)
{
return 0;
}
static inline bool xsk_umem_has_addrs_rq(struct xdp_umem *umem, u32 cnt)
{
return false;
}
static inline u64 *xsk_umem_peek_addr_rq(struct xdp_umem *umem, u64 *addr)
{
return NULL;
}
static inline void xsk_umem_release_addr_rq(struct xdp_umem *umem)
{
}
static inline void xsk_umem_fq_reuse(struct xdp_umem *umem, u64 addr)
{
}
static inline void xsk_set_rx_need_wakeup(struct xdp_umem *umem)
{
}
static inline void xsk_set_tx_need_wakeup(struct xdp_umem *umem)
{
}
static inline void xsk_clear_rx_need_wakeup(struct xdp_umem *umem)
{
}
static inline void xsk_clear_tx_need_wakeup(struct xdp_umem *umem)
{
}
static inline bool xsk_umem_uses_need_wakeup(struct xdp_umem *umem)
{
return false;
}
static inline u64 xsk_umem_adjust_offset(struct xdp_umem *umem, u64 handle,
u64 offset)
{
return 0;
}
static inline u32 xsk_umem_xdp_frame_sz(struct xdp_umem *umem)
{
return 0;
}
#endif /* CONFIG_XDP_SOCKETS */
#endif /* _LINUX_XDP_SOCK_DRV_H */
// SPDX-License-Identifier: GPL-2.0-only // SPDX-License-Identifier: GPL-2.0-only
#include <net/xdp_sock.h> #include <net/xdp_sock_drv.h>
#include "netlink.h" #include "netlink.h"
#include "common.h" #include "common.h"
......
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
#include <linux/sched/signal.h> #include <linux/sched/signal.h>
#include <linux/net.h> #include <linux/net.h>
#include <net/devlink.h> #include <net/devlink.h>
#include <net/xdp_sock.h> #include <net/xdp_sock_drv.h>
#include <net/flow_offload.h> #include <net/flow_offload.h>
#include <linux/ethtool_netlink.h> #include <linux/ethtool_netlink.h>
#include <generated/utsrelease.h> #include <generated/utsrelease.h>
......
...@@ -6,7 +6,7 @@ ...@@ -6,7 +6,7 @@
#ifndef XDP_UMEM_H_ #ifndef XDP_UMEM_H_
#define XDP_UMEM_H_ #define XDP_UMEM_H_
#include <net/xdp_sock.h> #include <net/xdp_sock_drv.h>
int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev, int xdp_umem_assign_dev(struct xdp_umem *umem, struct net_device *dev,
u16 queue_id, u16 flags); u16 queue_id, u16 flags);
......
...@@ -22,7 +22,7 @@ ...@@ -22,7 +22,7 @@
#include <linux/net.h> #include <linux/net.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/rculist.h> #include <linux/rculist.h>
#include <net/xdp_sock.h> #include <net/xdp_sock_drv.h>
#include <net/xdp.h> #include <net/xdp.h>
#include "xsk_queue.h" #include "xsk_queue.h"
......
...@@ -6,6 +6,7 @@ ...@@ -6,6 +6,7 @@
#include <linux/log2.h> #include <linux/log2.h>
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/overflow.h> #include <linux/overflow.h>
#include <net/xdp_sock_drv.h>
#include "xsk_queue.h" #include "xsk_queue.h"
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment