Commit 43a71cd6 authored by Coco Li's avatar Coco Li Committed by Jakub Kicinski

net-device: reorganize net_device fast path variables

Reorganize fast path variables on tx-txrx-rx order
Fastpath variables end after npinfo.

Below data generated with pahole on x86 architecture.

Fast path variables span cache lines before change: 12
Fast path variables span cache lines after change: 4
Suggested-by: default avatarEric Dumazet <edumazet@google.com>
Signed-off-by: default avatarCoco Li <lixiaoyan@google.com>
Reviewed-by: default avatarEric Dumazet <edumazet@google.com>
Reviewed-by: default avatarDavid Ahern <dsahern@kernel.org>
Link: https://lore.kernel.org/r/20231204201232.520025-2-lixiaoyan@google.comSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 5aa00e9e
...@@ -2097,6 +2097,70 @@ enum netdev_stat_type { ...@@ -2097,6 +2097,70 @@ enum netdev_stat_type {
*/ */
struct net_device { struct net_device {
/* Cacheline organization can be found documented in
* Documentation/networking/net_cachelines/net_device.rst.
* Please update the document when adding new fields.
*/
/* TX read-mostly hotpath */
__cacheline_group_begin(net_device_read_tx);
unsigned long long priv_flags;
const struct net_device_ops *netdev_ops;
const struct header_ops *header_ops;
struct netdev_queue *_tx;
unsigned int real_num_tx_queues;
unsigned int gso_max_size;
unsigned int gso_ipv4_max_size;
u16 gso_max_segs;
s16 num_tc;
/* Note : dev->mtu is often read without holding a lock.
* Writers usually hold RTNL.
* It is recommended to use READ_ONCE() to annotate the reads,
* and to use WRITE_ONCE() to annotate the writes.
*/
unsigned int mtu;
unsigned short needed_headroom;
struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
#ifdef CONFIG_XPS
struct xps_dev_maps __rcu *xps_maps[XPS_MAPS_MAX];
#endif
#ifdef CONFIG_NETFILTER_EGRESS
struct nf_hook_entries __rcu *nf_hooks_egress;
#endif
#ifdef CONFIG_NET_XGRESS
struct bpf_mprog_entry __rcu *tcx_egress;
#endif
__cacheline_group_end(net_device_read_tx);
/* TXRX read-mostly hotpath */
__cacheline_group_begin(net_device_read_txrx);
unsigned int flags;
unsigned short hard_header_len;
netdev_features_t features;
struct inet6_dev __rcu *ip6_ptr;
__cacheline_group_end(net_device_read_txrx);
/* RX read-mostly hotpath */
__cacheline_group_begin(net_device_read_rx);
struct list_head ptype_specific;
int ifindex;
unsigned int real_num_rx_queues;
struct netdev_rx_queue *_rx;
unsigned long gro_flush_timeout;
int napi_defer_hard_irqs;
unsigned int gro_max_size;
unsigned int gro_ipv4_max_size;
rx_handler_func_t __rcu *rx_handler;
void __rcu *rx_handler_data;
possible_net_t nd_net;
#ifdef CONFIG_NETPOLL
struct netpoll_info __rcu *npinfo;
#endif
#ifdef CONFIG_NET_XGRESS
struct bpf_mprog_entry __rcu *tcx_ingress;
#endif
__cacheline_group_end(net_device_read_rx);
char name[IFNAMSIZ]; char name[IFNAMSIZ];
struct netdev_name_node *name_node; struct netdev_name_node *name_node;
struct dev_ifalias __rcu *ifalias; struct dev_ifalias __rcu *ifalias;
...@@ -2121,7 +2185,6 @@ struct net_device { ...@@ -2121,7 +2185,6 @@ struct net_device {
struct list_head unreg_list; struct list_head unreg_list;
struct list_head close_list; struct list_head close_list;
struct list_head ptype_all; struct list_head ptype_all;
struct list_head ptype_specific;
struct { struct {
struct list_head upper; struct list_head upper;
...@@ -2129,26 +2192,13 @@ struct net_device { ...@@ -2129,26 +2192,13 @@ struct net_device {
} adj_list; } adj_list;
/* Read-mostly cache-line for fast-path access */ /* Read-mostly cache-line for fast-path access */
unsigned int flags;
xdp_features_t xdp_features; xdp_features_t xdp_features;
unsigned long long priv_flags;
const struct net_device_ops *netdev_ops;
const struct xdp_metadata_ops *xdp_metadata_ops; const struct xdp_metadata_ops *xdp_metadata_ops;
const struct xsk_tx_metadata_ops *xsk_tx_metadata_ops; const struct xsk_tx_metadata_ops *xsk_tx_metadata_ops;
int ifindex;
unsigned short gflags; unsigned short gflags;
unsigned short hard_header_len;
/* Note : dev->mtu is often read without holding a lock.
* Writers usually hold RTNL.
* It is recommended to use READ_ONCE() to annotate the reads,
* and to use WRITE_ONCE() to annotate the writes.
*/
unsigned int mtu;
unsigned short needed_headroom;
unsigned short needed_tailroom; unsigned short needed_tailroom;
netdev_features_t features;
netdev_features_t hw_features; netdev_features_t hw_features;
netdev_features_t wanted_features; netdev_features_t wanted_features;
netdev_features_t vlan_features; netdev_features_t vlan_features;
...@@ -2192,8 +2242,6 @@ struct net_device { ...@@ -2192,8 +2242,6 @@ struct net_device {
const struct tlsdev_ops *tlsdev_ops; const struct tlsdev_ops *tlsdev_ops;
#endif #endif
const struct header_ops *header_ops;
unsigned char operstate; unsigned char operstate;
unsigned char link_mode; unsigned char link_mode;
...@@ -2234,9 +2282,7 @@ struct net_device { ...@@ -2234,9 +2282,7 @@ struct net_device {
/* Protocol-specific pointers */ /* Protocol-specific pointers */
struct in_device __rcu *ip_ptr; struct in_device __rcu *ip_ptr;
struct inet6_dev __rcu *ip6_ptr;
#if IS_ENABLED(CONFIG_VLAN_8021Q) #if IS_ENABLED(CONFIG_VLAN_8021Q)
struct vlan_info __rcu *vlan_info; struct vlan_info __rcu *vlan_info;
#endif #endif
...@@ -2271,26 +2317,14 @@ struct net_device { ...@@ -2271,26 +2317,14 @@ struct net_device {
/* Interface address info used in eth_type_trans() */ /* Interface address info used in eth_type_trans() */
const unsigned char *dev_addr; const unsigned char *dev_addr;
struct netdev_rx_queue *_rx;
unsigned int num_rx_queues; unsigned int num_rx_queues;
unsigned int real_num_rx_queues;
struct bpf_prog __rcu *xdp_prog; struct bpf_prog __rcu *xdp_prog;
unsigned long gro_flush_timeout;
int napi_defer_hard_irqs;
#define GRO_LEGACY_MAX_SIZE 65536u #define GRO_LEGACY_MAX_SIZE 65536u
/* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE), /* TCP minimal MSS is 8 (TCP_MIN_GSO_SIZE),
* and shinfo->gso_segs is a 16bit field. * and shinfo->gso_segs is a 16bit field.
*/ */
#define GRO_MAX_SIZE (8 * 65535u) #define GRO_MAX_SIZE (8 * 65535u)
unsigned int gro_max_size;
unsigned int gro_ipv4_max_size;
unsigned int xdp_zc_max_segs; unsigned int xdp_zc_max_segs;
rx_handler_func_t __rcu *rx_handler;
void __rcu *rx_handler_data;
#ifdef CONFIG_NET_XGRESS
struct bpf_mprog_entry __rcu *tcx_ingress;
#endif
struct netdev_queue __rcu *ingress_queue; struct netdev_queue __rcu *ingress_queue;
#ifdef CONFIG_NETFILTER_INGRESS #ifdef CONFIG_NETFILTER_INGRESS
struct nf_hook_entries __rcu *nf_hooks_ingress; struct nf_hook_entries __rcu *nf_hooks_ingress;
...@@ -2305,25 +2339,13 @@ struct net_device { ...@@ -2305,25 +2339,13 @@ struct net_device {
/* /*
* Cache lines mostly used on transmit path * Cache lines mostly used on transmit path
*/ */
struct netdev_queue *_tx ____cacheline_aligned_in_smp;
unsigned int num_tx_queues; unsigned int num_tx_queues;
unsigned int real_num_tx_queues;
struct Qdisc __rcu *qdisc; struct Qdisc __rcu *qdisc;
unsigned int tx_queue_len; unsigned int tx_queue_len;
spinlock_t tx_global_lock; spinlock_t tx_global_lock;
struct xdp_dev_bulk_queue __percpu *xdp_bulkq; struct xdp_dev_bulk_queue __percpu *xdp_bulkq;
#ifdef CONFIG_XPS
struct xps_dev_maps __rcu *xps_maps[XPS_MAPS_MAX];
#endif
#ifdef CONFIG_NET_XGRESS
struct bpf_mprog_entry __rcu *tcx_egress;
#endif
#ifdef CONFIG_NETFILTER_EGRESS
struct nf_hook_entries __rcu *nf_hooks_egress;
#endif
#ifdef CONFIG_NET_SCHED #ifdef CONFIG_NET_SCHED
DECLARE_HASHTABLE (qdisc_hash, 4); DECLARE_HASHTABLE (qdisc_hash, 4);
#endif #endif
...@@ -2362,12 +2384,6 @@ struct net_device { ...@@ -2362,12 +2384,6 @@ struct net_device {
bool needs_free_netdev; bool needs_free_netdev;
void (*priv_destructor)(struct net_device *dev); void (*priv_destructor)(struct net_device *dev);
#ifdef CONFIG_NETPOLL
struct netpoll_info __rcu *npinfo;
#endif
possible_net_t nd_net;
/* mid-layer private */ /* mid-layer private */
void *ml_priv; void *ml_priv;
enum netdev_ml_priv_type ml_priv_type; enum netdev_ml_priv_type ml_priv_type;
...@@ -2402,20 +2418,15 @@ struct net_device { ...@@ -2402,20 +2418,15 @@ struct net_device {
*/ */
#define GSO_MAX_SIZE (8 * GSO_MAX_SEGS) #define GSO_MAX_SIZE (8 * GSO_MAX_SEGS)
unsigned int gso_max_size;
#define TSO_LEGACY_MAX_SIZE 65536 #define TSO_LEGACY_MAX_SIZE 65536
#define TSO_MAX_SIZE UINT_MAX #define TSO_MAX_SIZE UINT_MAX
unsigned int tso_max_size; unsigned int tso_max_size;
u16 gso_max_segs;
#define TSO_MAX_SEGS U16_MAX #define TSO_MAX_SEGS U16_MAX
u16 tso_max_segs; u16 tso_max_segs;
unsigned int gso_ipv4_max_size;
#ifdef CONFIG_DCB #ifdef CONFIG_DCB
const struct dcbnl_rtnl_ops *dcbnl_ops; const struct dcbnl_rtnl_ops *dcbnl_ops;
#endif #endif
s16 num_tc;
struct netdev_tc_txq tc_to_txq[TC_MAX_QUEUE];
u8 prio_tc_map[TC_BITMASK + 1]; u8 prio_tc_map[TC_BITMASK + 1];
#if IS_ENABLED(CONFIG_FCOE) #if IS_ENABLED(CONFIG_FCOE)
......
...@@ -11609,6 +11609,60 @@ static struct pernet_operations __net_initdata default_device_ops = { ...@@ -11609,6 +11609,60 @@ static struct pernet_operations __net_initdata default_device_ops = {
.exit_batch = default_device_exit_batch, .exit_batch = default_device_exit_batch,
}; };
static void __init net_dev_struct_check(void)
{
/* TX read-mostly hotpath */
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, priv_flags);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, netdev_ops);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, header_ops);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, _tx);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, real_num_tx_queues);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, gso_max_size);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, gso_ipv4_max_size);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, gso_max_segs);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, num_tc);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, mtu);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, needed_headroom);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, tc_to_txq);
#ifdef CONFIG_XPS
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, xps_maps);
#endif
#ifdef CONFIG_NETFILTER_EGRESS
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, nf_hooks_egress);
#endif
#ifdef CONFIG_NET_XGRESS
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_tx, tcx_egress);
#endif
CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_tx, 152);
/* TXRX read-mostly hotpath */
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, flags);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, hard_header_len);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, features);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_txrx, ip6_ptr);
CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_txrx, 30);
/* RX read-mostly hotpath */
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, ptype_specific);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, ifindex);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, real_num_rx_queues);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, _rx);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, gro_flush_timeout);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, napi_defer_hard_irqs);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, gro_max_size);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, gro_ipv4_max_size);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, rx_handler);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, rx_handler_data);
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, nd_net);
#ifdef CONFIG_NETPOLL
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, npinfo);
#endif
#ifdef CONFIG_NET_XGRESS
CACHELINE_ASSERT_GROUP_MEMBER(struct net_device, net_device_read_rx, tcx_ingress);
#endif
CACHELINE_ASSERT_GROUP_SIZE(struct net_device, net_device_read_rx, 96);
}
/* /*
* Initialize the DEV module. At boot time this walks the device list and * Initialize the DEV module. At boot time this walks the device list and
* unhooks any devices that fail to initialise (normally hardware not * unhooks any devices that fail to initialise (normally hardware not
...@@ -11626,6 +11680,8 @@ static int __init net_dev_init(void) ...@@ -11626,6 +11680,8 @@ static int __init net_dev_init(void)
BUG_ON(!dev_boot_phase); BUG_ON(!dev_boot_phase);
net_dev_struct_check();
if (dev_proc_init()) if (dev_proc_init())
goto out; goto out;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment