Commit d000574d authored by David Arinzon's avatar David Arinzon Committed by Jakub Kicinski

net: ena: Move XDP code to its new files

XDP system has a very large footprint in the driver's overall code.
makes the whole driver's code much harder to read.

Moving XDP code to dedicated files.

This patch doesn't make any changes to the code itself and only
cut-pastes the code into ena_xdp.c and ena_xdp.h files so the change
is purely cosmetic.
Signed-off-by: default avatarShay Agroskin <shayagr@amazon.com>
Signed-off-by: default avatarDavid Arinzon <darinzon@amazon.com>
Link: https://lore.kernel.org/r/20240101190855.18739-2-darinzon@amazon.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 4ebb1f95
......@@ -54,6 +54,7 @@ ena_common_defs.h Common definitions for ena_com layer.
ena_regs_defs.h Definition of ENA PCI memory-mapped (MMIO) registers.
ena_netdev.[ch] Main Linux kernel driver.
ena_ethtool.c ethtool callbacks.
ena_xdp.[ch] XDP files
ena_pci_id_tbl.h Supported device IDs.
================= ======================================================
......
......@@ -5,4 +5,4 @@
obj-$(CONFIG_ENA_ETHERNET) += ena.o
ena-y := ena_netdev.o ena_com.o ena_eth_com.o ena_ethtool.o
ena-y := ena_netdev.o ena_com.o ena_eth_com.o ena_ethtool.o ena_xdp.o
......@@ -7,6 +7,7 @@
#include <linux/pci.h>
#include "ena_netdev.h"
#include "ena_xdp.h"
struct ena_stats {
char name[ETH_GSTRING_LEN];
......
......@@ -110,19 +110,6 @@
#define ENA_MMIO_DISABLE_REG_READ BIT(0)
/* The max MTU size is configured to be the ethernet frame size without
* the overhead of the ethernet header, which can have a VLAN header, and
* a frame check sequence (FCS).
* The buffer size we share with the device is defined to be ENA_PAGE_SIZE
*/
#define ENA_XDP_MAX_MTU (ENA_PAGE_SIZE - ETH_HLEN - ETH_FCS_LEN - \
VLAN_HLEN - XDP_PACKET_HEADROOM - \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define ENA_IS_XDP_INDEX(adapter, index) (((index) >= (adapter)->xdp_first_ring) && \
((index) < (adapter)->xdp_first_ring + (adapter)->xdp_num_queues))
struct ena_irq {
irq_handler_t handler;
void *data;
......@@ -421,47 +408,44 @@ static inline void ena_reset_device(struct ena_adapter *adapter,
set_bit(ENA_FLAG_TRIGGER_RESET, &adapter->flags);
}
enum ena_xdp_errors_t {
ENA_XDP_ALLOWED = 0,
ENA_XDP_CURRENT_MTU_TOO_LARGE,
ENA_XDP_NO_ENOUGH_QUEUES,
};
enum ENA_XDP_ACTIONS {
ENA_XDP_PASS = 0,
ENA_XDP_TX = BIT(0),
ENA_XDP_REDIRECT = BIT(1),
ENA_XDP_DROP = BIT(2)
};
#define ENA_XDP_FORWARDED (ENA_XDP_TX | ENA_XDP_REDIRECT)
static inline bool ena_xdp_present(struct ena_adapter *adapter)
{
return !!adapter->xdp_bpf_prog;
}
static inline bool ena_xdp_present_ring(struct ena_ring *ring)
{
return !!ring->xdp_bpf_prog;
}
int handle_invalid_req_id(struct ena_ring *ring, u16 req_id,
struct ena_tx_buffer *tx_info, bool is_xdp);
static inline bool ena_xdp_legal_queue_count(struct ena_adapter *adapter,
u32 queues)
/* Increase a stat by cnt while holding syncp seqlock on 32bit machines */
static inline void ena_increase_stat(u64 *statp, u64 cnt,
struct u64_stats_sync *syncp)
{
return 2 * queues <= adapter->max_num_io_queues;
u64_stats_update_begin(syncp);
(*statp) += cnt;
u64_stats_update_end(syncp);
}
static inline enum ena_xdp_errors_t ena_xdp_allowed(struct ena_adapter *adapter)
static inline void ena_ring_tx_doorbell(struct ena_ring *tx_ring)
{
enum ena_xdp_errors_t rc = ENA_XDP_ALLOWED;
if (adapter->netdev->mtu > ENA_XDP_MAX_MTU)
rc = ENA_XDP_CURRENT_MTU_TOO_LARGE;
else if (!ena_xdp_legal_queue_count(adapter, adapter->num_io_queues))
rc = ENA_XDP_NO_ENOUGH_QUEUES;
return rc;
ena_com_write_sq_doorbell(tx_ring->ena_com_io_sq);
ena_increase_stat(&tx_ring->tx_stats.doorbells, 1, &tx_ring->syncp);
}
int ena_xmit_common(struct net_device *dev,
struct ena_ring *ring,
struct ena_tx_buffer *tx_info,
struct ena_com_tx_ctx *ena_tx_ctx,
u16 next_to_use,
u32 bytes);
void ena_unmap_tx_buff(struct ena_ring *tx_ring,
struct ena_tx_buffer *tx_info);
void ena_init_io_rings(struct ena_adapter *adapter,
int first_index, int count);
int ena_create_io_tx_queues_in_range(struct ena_adapter *adapter,
int first_index, int count);
int ena_setup_tx_resources_in_range(struct ena_adapter *adapter,
int first_index, int count);
void ena_free_all_io_tx_resources_in_range(struct ena_adapter *adapter,
int first_index, int count);
void ena_free_all_io_tx_resources(struct ena_adapter *adapter);
void ena_down(struct ena_adapter *adapter);
int ena_up(struct ena_adapter *adapter);
void ena_unmask_interrupt(struct ena_ring *tx_ring, struct ena_ring *rx_ring);
void ena_update_ring_numa_node(struct ena_ring *tx_ring,
struct ena_ring *rx_ring);
#endif /* !(ENA_H) */
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 OR Linux-OpenIB */
/*
* Copyright 2015-2021 Amazon.com, Inc. or its affiliates. All rights reserved.
*/
#ifndef ENA_XDP_H
#define ENA_XDP_H
#include "ena_netdev.h"
#include <linux/bpf_trace.h>
/* The max MTU size is configured to be the ethernet frame size without
* the overhead of the ethernet header, which can have a VLAN header, and
* a frame check sequence (FCS).
* The buffer size we share with the device is defined to be ENA_PAGE_SIZE
*/
#define ENA_XDP_MAX_MTU (ENA_PAGE_SIZE - ETH_HLEN - ETH_FCS_LEN - \
VLAN_HLEN - XDP_PACKET_HEADROOM - \
SKB_DATA_ALIGN(sizeof(struct skb_shared_info)))
#define ENA_IS_XDP_INDEX(adapter, index) (((index) >= (adapter)->xdp_first_ring) && \
((index) < (adapter)->xdp_first_ring + (adapter)->xdp_num_queues))
enum ENA_XDP_ACTIONS {
ENA_XDP_PASS = 0,
ENA_XDP_TX = BIT(0),
ENA_XDP_REDIRECT = BIT(1),
ENA_XDP_DROP = BIT(2)
};
#define ENA_XDP_FORWARDED (ENA_XDP_TX | ENA_XDP_REDIRECT)
int ena_setup_and_create_all_xdp_queues(struct ena_adapter *adapter);
void ena_xdp_exchange_program_rx_in_range(struct ena_adapter *adapter,
struct bpf_prog *prog,
int first, int count);
int ena_xdp_io_poll(struct napi_struct *napi, int budget);
int ena_xdp_xmit_frame(struct ena_ring *xdp_ring,
struct net_device *dev,
struct xdp_frame *xdpf,
int flags);
int ena_xdp_xmit(struct net_device *dev, int n,
struct xdp_frame **frames, u32 flags);
int ena_xdp(struct net_device *netdev, struct netdev_bpf *bpf);
enum ena_xdp_errors_t {
ENA_XDP_ALLOWED = 0,
ENA_XDP_CURRENT_MTU_TOO_LARGE,
ENA_XDP_NO_ENOUGH_QUEUES,
};
static inline bool ena_xdp_present(struct ena_adapter *adapter)
{
return !!adapter->xdp_bpf_prog;
}
static inline bool ena_xdp_present_ring(struct ena_ring *ring)
{
return !!ring->xdp_bpf_prog;
}
static inline bool ena_xdp_legal_queue_count(struct ena_adapter *adapter,
u32 queues)
{
return 2 * queues <= adapter->max_num_io_queues;
}
static inline enum ena_xdp_errors_t ena_xdp_allowed(struct ena_adapter *adapter)
{
enum ena_xdp_errors_t rc = ENA_XDP_ALLOWED;
if (adapter->netdev->mtu > ENA_XDP_MAX_MTU)
rc = ENA_XDP_CURRENT_MTU_TOO_LARGE;
else if (!ena_xdp_legal_queue_count(adapter, adapter->num_io_queues))
rc = ENA_XDP_NO_ENOUGH_QUEUES;
return rc;
}
static inline int ena_xdp_execute(struct ena_ring *rx_ring, struct xdp_buff *xdp)
{
u32 verdict = ENA_XDP_PASS;
struct bpf_prog *xdp_prog;
struct ena_ring *xdp_ring;
struct xdp_frame *xdpf;
u64 *xdp_stat;
xdp_prog = READ_ONCE(rx_ring->xdp_bpf_prog);
if (!xdp_prog)
return verdict;
verdict = bpf_prog_run_xdp(xdp_prog, xdp);
switch (verdict) {
case XDP_TX:
xdpf = xdp_convert_buff_to_frame(xdp);
if (unlikely(!xdpf)) {
trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
xdp_stat = &rx_ring->rx_stats.xdp_aborted;
verdict = ENA_XDP_DROP;
break;
}
/* Find xmit queue */
xdp_ring = rx_ring->xdp_ring;
/* The XDP queues are shared between XDP_TX and XDP_REDIRECT */
spin_lock(&xdp_ring->xdp_tx_lock);
if (ena_xdp_xmit_frame(xdp_ring, rx_ring->netdev, xdpf,
XDP_XMIT_FLUSH))
xdp_return_frame(xdpf);
spin_unlock(&xdp_ring->xdp_tx_lock);
xdp_stat = &rx_ring->rx_stats.xdp_tx;
verdict = ENA_XDP_TX;
break;
case XDP_REDIRECT:
if (likely(!xdp_do_redirect(rx_ring->netdev, xdp, xdp_prog))) {
xdp_stat = &rx_ring->rx_stats.xdp_redirect;
verdict = ENA_XDP_REDIRECT;
break;
}
trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
xdp_stat = &rx_ring->rx_stats.xdp_aborted;
verdict = ENA_XDP_DROP;
break;
case XDP_ABORTED:
trace_xdp_exception(rx_ring->netdev, xdp_prog, verdict);
xdp_stat = &rx_ring->rx_stats.xdp_aborted;
verdict = ENA_XDP_DROP;
break;
case XDP_DROP:
xdp_stat = &rx_ring->rx_stats.xdp_drop;
verdict = ENA_XDP_DROP;
break;
case XDP_PASS:
xdp_stat = &rx_ring->rx_stats.xdp_pass;
verdict = ENA_XDP_PASS;
break;
default:
bpf_warn_invalid_xdp_action(rx_ring->netdev, xdp_prog, verdict);
xdp_stat = &rx_ring->rx_stats.xdp_invalid;
verdict = ENA_XDP_DROP;
}
ena_increase_stat(xdp_stat, 1, &rx_ring->syncp);
return verdict;
}
#endif /* ENA_XDP_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment