Commit 82c41671 authored by Björn Töpel's avatar Björn Töpel Committed by Alexei Starovoitov

xdp: Simplify xdp_return_{frame, frame_rx_napi, buff}

The xdp_return_{frame,frame_rx_napi,buff} function are never used,
except in xdp_convert_zc_to_xdp_frame(), by the MEM_TYPE_XSK_BUFF_POOL
memory type.

To simplify and reduce code, change so that
xdp_convert_zc_to_xdp_frame() calls xsk_buff_free() directly since the
type is know, and remove MEM_TYPE_XSK_BUFF_POOL from the switch
statement in __xdp_return() function.
Suggested-by: default avatarMaxim Mikityanskiy <maximmi@mellanox.com>
Signed-off-by: default avatarBjörn Töpel <bjorn.topel@intel.com>
Signed-off-by: default avatarAlexei Starovoitov <ast@kernel.org>
Link: https://lore.kernel.org/bpf/20200520192103.355233-14-bjorn.topel@gmail.com
parent 0807892e
...@@ -335,10 +335,11 @@ EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model); ...@@ -335,10 +335,11 @@ EXPORT_SYMBOL_GPL(xdp_rxq_info_reg_mem_model);
* scenarios (e.g. queue full), it is possible to return the xdp_frame * scenarios (e.g. queue full), it is possible to return the xdp_frame
* while still leveraging this protection. The @napi_direct boolean * while still leveraging this protection. The @napi_direct boolean
* is used for those calls sites. Thus, allowing for faster recycling * is used for those calls sites. Thus, allowing for faster recycling
* of xdp_frames/pages in those cases. * of xdp_frames/pages in those cases. This path is never used by the
* MEM_TYPE_XSK_BUFF_POOL memory type, so it's explicitly not part of
* the switch-statement.
*/ */
static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct)
struct xdp_buff *xdp)
{ {
struct xdp_mem_allocator *xa; struct xdp_mem_allocator *xa;
struct page *page; struct page *page;
...@@ -360,33 +361,29 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct, ...@@ -360,33 +361,29 @@ static void __xdp_return(void *data, struct xdp_mem_info *mem, bool napi_direct,
page = virt_to_page(data); /* Assumes order0 page*/ page = virt_to_page(data); /* Assumes order0 page*/
put_page(page); put_page(page);
break; break;
case MEM_TYPE_XSK_BUFF_POOL:
/* NB! Only valid from an xdp_buff! */
xsk_buff_free(xdp);
break;
default: default:
/* Not possible, checked in xdp_rxq_info_reg_mem_model() */ /* Not possible, checked in xdp_rxq_info_reg_mem_model() */
WARN(1, "Incorrect XDP memory type (%d) usage", mem->type);
break; break;
} }
} }
void xdp_return_frame(struct xdp_frame *xdpf) void xdp_return_frame(struct xdp_frame *xdpf)
{ {
__xdp_return(xdpf->data, &xdpf->mem, false, NULL); __xdp_return(xdpf->data, &xdpf->mem, false);
} }
EXPORT_SYMBOL_GPL(xdp_return_frame); EXPORT_SYMBOL_GPL(xdp_return_frame);
void xdp_return_frame_rx_napi(struct xdp_frame *xdpf) void xdp_return_frame_rx_napi(struct xdp_frame *xdpf)
{ {
__xdp_return(xdpf->data, &xdpf->mem, true, NULL); __xdp_return(xdpf->data, &xdpf->mem, true);
} }
EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi); EXPORT_SYMBOL_GPL(xdp_return_frame_rx_napi);
void xdp_return_buff(struct xdp_buff *xdp) void xdp_return_buff(struct xdp_buff *xdp)
{ {
__xdp_return(xdp->data, &xdp->rxq->mem, true, xdp); __xdp_return(xdp->data, &xdp->rxq->mem, true);
} }
EXPORT_SYMBOL_GPL(xdp_return_buff);
/* Only called for MEM_TYPE_PAGE_POOL see xdp.h */ /* Only called for MEM_TYPE_PAGE_POOL see xdp.h */
void __xdp_release_frame(void *data, struct xdp_mem_info *mem) void __xdp_release_frame(void *data, struct xdp_mem_info *mem)
...@@ -467,7 +464,7 @@ struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp) ...@@ -467,7 +464,7 @@ struct xdp_frame *xdp_convert_zc_to_xdp_frame(struct xdp_buff *xdp)
xdpf->metasize = metasize; xdpf->metasize = metasize;
xdpf->mem.type = MEM_TYPE_PAGE_ORDER0; xdpf->mem.type = MEM_TYPE_PAGE_ORDER0;
xdp_return_buff(xdp); xsk_buff_free(xdp);
return xdpf; return xdpf;
} }
EXPORT_SYMBOL_GPL(xdp_convert_zc_to_xdp_frame); EXPORT_SYMBOL_GPL(xdp_convert_zc_to_xdp_frame);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment