Commit ba89966c authored by Eric Dumazet's avatar Eric Dumazet Committed by David S. Miller

[NET]: use __read_mostly on kmem_cache_t , DEFINE_SNMP_STAT pointers

This patch puts mostly read only data in the right section
(read_mostly), to help sharing of these data between CPUS without
memory ping pongs.

On one of my production machine, tcp_statistics was sitting in a
heavily modified cache line, so *every* SNMP update had to force a
reload.
Signed-off-by: default avatarEric Dumazet <dada1@cosmosbay.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 29cb9f9c
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
#include <asm/atomic.h> #include <asm/atomic.h>
#include "br_private.h" #include "br_private.h"
static kmem_cache_t *br_fdb_cache; static kmem_cache_t *br_fdb_cache __read_mostly;
static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source, static int fdb_insert(struct net_bridge *br, struct net_bridge_port *source,
const unsigned char *addr); const unsigned char *addr);
......
...@@ -42,7 +42,7 @@ static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL }; ...@@ -42,7 +42,7 @@ static DEFINE_PER_CPU(struct flow_cache_entry **, flow_tables) = { NULL };
#define flow_table(cpu) (per_cpu(flow_tables, cpu)) #define flow_table(cpu) (per_cpu(flow_tables, cpu))
static kmem_cache_t *flow_cachep; static kmem_cache_t *flow_cachep __read_mostly;
static int flow_lwm, flow_hwm; static int flow_lwm, flow_hwm;
......
...@@ -68,8 +68,8 @@ ...@@ -68,8 +68,8 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/system.h> #include <asm/system.h>
static kmem_cache_t *skbuff_head_cache; static kmem_cache_t *skbuff_head_cache __read_mostly;
static kmem_cache_t *skbuff_fclone_cache; static kmem_cache_t *skbuff_fclone_cache __read_mostly;
struct timeval __read_mostly skb_tv_base; struct timeval __read_mostly skb_tv_base;
......
...@@ -85,7 +85,7 @@ static int ccid3_debug; ...@@ -85,7 +85,7 @@ static int ccid3_debug;
static struct dccp_tx_hist *ccid3_tx_hist; static struct dccp_tx_hist *ccid3_tx_hist;
static struct dccp_rx_hist *ccid3_rx_hist; static struct dccp_rx_hist *ccid3_rx_hist;
static kmem_cache_t *ccid3_loss_interval_hist_slab; static kmem_cache_t *ccid3_loss_interval_hist_slab __read_mostly;
static inline struct ccid3_loss_interval_hist_entry * static inline struct ccid3_loss_interval_hist_entry *
ccid3_loss_interval_hist_entry_new(const unsigned int __nocast prio) ccid3_loss_interval_hist_entry_new(const unsigned int __nocast prio)
......
...@@ -39,7 +39,7 @@ ...@@ -39,7 +39,7 @@
#include "ccid.h" #include "ccid.h"
#include "dccp.h" #include "dccp.h"
DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics); DEFINE_SNMP_STAT(struct dccp_mib, dccp_statistics) __read_mostly;
atomic_t dccp_orphan_count = ATOMIC_INIT(0); atomic_t dccp_orphan_count = ATOMIC_INIT(0);
......
...@@ -79,7 +79,7 @@ for( ; ((f) = *(fp)) != NULL && dn_key_eq((f)->fn_key, (key)); (fp) = &(f)->fn_n ...@@ -79,7 +79,7 @@ for( ; ((f) = *(fp)) != NULL && dn_key_eq((f)->fn_key, (key)); (fp) = &(f)->fn_n
static DEFINE_RWLOCK(dn_fib_tables_lock); static DEFINE_RWLOCK(dn_fib_tables_lock);
struct dn_fib_table *dn_fib_tables[RT_TABLE_MAX + 1]; struct dn_fib_table *dn_fib_tables[RT_TABLE_MAX + 1];
static kmem_cache_t *dn_hash_kmem; static kmem_cache_t *dn_hash_kmem __read_mostly;
static int dn_fib_hash_zombies; static int dn_fib_hash_zombies;
static inline dn_fib_idx_t dn_hash(dn_fib_key_t key, struct dn_zone *dz) static inline dn_fib_idx_t dn_hash(dn_fib_key_t key, struct dn_zone *dz)
......
...@@ -113,7 +113,7 @@ ...@@ -113,7 +113,7 @@
#include <linux/mroute.h> #include <linux/mroute.h>
#endif #endif
DEFINE_SNMP_STAT(struct linux_mib, net_statistics); DEFINE_SNMP_STAT(struct linux_mib, net_statistics) __read_mostly;
extern void ip_mc_drop_socket(struct sock *sk); extern void ip_mc_drop_socket(struct sock *sk);
......
...@@ -45,8 +45,8 @@ ...@@ -45,8 +45,8 @@
#include "fib_lookup.h" #include "fib_lookup.h"
static kmem_cache_t *fn_hash_kmem; static kmem_cache_t *fn_hash_kmem __read_mostly;
static kmem_cache_t *fn_alias_kmem; static kmem_cache_t *fn_alias_kmem __read_mostly;
struct fib_node { struct fib_node {
struct hlist_node fn_hash; struct hlist_node fn_hash;
......
...@@ -166,7 +166,7 @@ static struct tnode *halve(struct trie *t, struct tnode *tn); ...@@ -166,7 +166,7 @@ static struct tnode *halve(struct trie *t, struct tnode *tn);
static void tnode_free(struct tnode *tn); static void tnode_free(struct tnode *tn);
static void trie_dump_seq(struct seq_file *seq, struct trie *t); static void trie_dump_seq(struct seq_file *seq, struct trie *t);
static kmem_cache_t *fn_alias_kmem; static kmem_cache_t *fn_alias_kmem __read_mostly;
static struct trie *trie_local = NULL, *trie_main = NULL; static struct trie *trie_local = NULL, *trie_main = NULL;
......
...@@ -114,7 +114,7 @@ struct icmp_bxm { ...@@ -114,7 +114,7 @@ struct icmp_bxm {
/* /*
* Statistics * Statistics
*/ */
DEFINE_SNMP_STAT(struct icmp_mib, icmp_statistics); DEFINE_SNMP_STAT(struct icmp_mib, icmp_statistics) __read_mostly;
/* An array of errno for error messages from dest unreach. */ /* An array of errno for error messages from dest unreach. */
/* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOST_UNREACH and SR_FAILED MUST be considered 'transient errs'. */ /* RFC 1122: 3.2.2.1 States that NET_UNREACH, HOST_UNREACH and SR_FAILED MUST be considered 'transient errs'. */
......
...@@ -73,7 +73,7 @@ ...@@ -73,7 +73,7 @@
/* Exported for inet_getid inline function. */ /* Exported for inet_getid inline function. */
DEFINE_SPINLOCK(inet_peer_idlock); DEFINE_SPINLOCK(inet_peer_idlock);
static kmem_cache_t *peer_cachep; static kmem_cache_t *peer_cachep __read_mostly;
#define node_height(x) x->avl_height #define node_height(x) x->avl_height
static struct inet_peer peer_fake_node = { static struct inet_peer peer_fake_node = {
......
...@@ -150,7 +150,7 @@ ...@@ -150,7 +150,7 @@
* SNMP management statistics * SNMP management statistics
*/ */
DEFINE_SNMP_STAT(struct ipstats_mib, ip_statistics); DEFINE_SNMP_STAT(struct ipstats_mib, ip_statistics) __read_mostly;
/* /*
* Process Router Attention IP option * Process Router Attention IP option
......
...@@ -103,7 +103,7 @@ static DEFINE_SPINLOCK(mfc_unres_lock); ...@@ -103,7 +103,7 @@ static DEFINE_SPINLOCK(mfc_unres_lock);
In this case data path is free of exclusive locks at all. In this case data path is free of exclusive locks at all.
*/ */
static kmem_cache_t *mrt_cachep; static kmem_cache_t *mrt_cachep __read_mostly;
static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local); static int ip_mr_forward(struct sk_buff *skb, struct mfc_cache *cache, int local);
static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert); static int ipmr_cache_report(struct sk_buff *pkt, vifi_t vifi, int assert);
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
static struct list_head *ip_vs_conn_tab; static struct list_head *ip_vs_conn_tab;
/* SLAB cache for IPVS connections */ /* SLAB cache for IPVS connections */
static kmem_cache_t *ip_vs_conn_cachep; static kmem_cache_t *ip_vs_conn_cachep __read_mostly;
/* counter for current IPVS connections */ /* counter for current IPVS connections */
static atomic_t ip_vs_conn_count = ATOMIC_INIT(0); static atomic_t ip_vs_conn_count = ATOMIC_INIT(0);
......
...@@ -70,8 +70,8 @@ static LIST_HEAD(helpers); ...@@ -70,8 +70,8 @@ static LIST_HEAD(helpers);
unsigned int ip_conntrack_htable_size = 0; unsigned int ip_conntrack_htable_size = 0;
int ip_conntrack_max; int ip_conntrack_max;
struct list_head *ip_conntrack_hash; struct list_head *ip_conntrack_hash;
static kmem_cache_t *ip_conntrack_cachep; static kmem_cache_t *ip_conntrack_cachep __read_mostly;
static kmem_cache_t *ip_conntrack_expect_cachep; static kmem_cache_t *ip_conntrack_expect_cachep __read_mostly;
struct ip_conntrack ip_conntrack_untracked; struct ip_conntrack ip_conntrack_untracked;
unsigned int ip_ct_log_invalid; unsigned int ip_ct_log_invalid;
static LIST_HEAD(unconfirmed); static LIST_HEAD(unconfirmed);
......
...@@ -94,7 +94,7 @@ struct ipt_hashlimit_htable { ...@@ -94,7 +94,7 @@ struct ipt_hashlimit_htable {
static DEFINE_SPINLOCK(hashlimit_lock); /* protects htables list */ static DEFINE_SPINLOCK(hashlimit_lock); /* protects htables list */
static DECLARE_MUTEX(hlimit_mutex); /* additional checkentry protection */ static DECLARE_MUTEX(hlimit_mutex); /* additional checkentry protection */
static HLIST_HEAD(hashlimit_htables); static HLIST_HEAD(hashlimit_htables);
static kmem_cache_t *hashlimit_cachep; static kmem_cache_t *hashlimit_cachep __read_mostly;
static inline int dst_cmp(const struct dsthash_ent *ent, struct dsthash_dst *b) static inline int dst_cmp(const struct dsthash_ent *ent, struct dsthash_dst *b)
{ {
......
...@@ -269,7 +269,7 @@ ...@@ -269,7 +269,7 @@
int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT; int sysctl_tcp_fin_timeout = TCP_FIN_TIMEOUT;
DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics); DEFINE_SNMP_STAT(struct tcp_mib, tcp_statistics) __read_mostly;
atomic_t tcp_orphan_count = ATOMIC_INIT(0); atomic_t tcp_orphan_count = ATOMIC_INIT(0);
......
...@@ -113,7 +113,7 @@ ...@@ -113,7 +113,7 @@
* Snmp MIB for the UDP layer * Snmp MIB for the UDP layer
*/ */
DEFINE_SNMP_STAT(struct udp_mib, udp_statistics); DEFINE_SNMP_STAT(struct udp_mib, udp_statistics) __read_mostly;
struct hlist_head udp_hash[UDP_HTABLE_SIZE]; struct hlist_head udp_hash[UDP_HTABLE_SIZE];
DEFINE_RWLOCK(udp_hash_lock); DEFINE_RWLOCK(udp_hash_lock);
......
...@@ -67,7 +67,7 @@ ...@@ -67,7 +67,7 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/system.h> #include <asm/system.h>
DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics); DEFINE_SNMP_STAT(struct icmpv6_mib, icmpv6_statistics) __read_mostly;
/* /*
* The ICMP socket(s). This is the most convenient way to flow control * The ICMP socket(s). This is the most convenient way to flow control
......
...@@ -49,7 +49,7 @@ ...@@ -49,7 +49,7 @@
struct rt6_statistics rt6_stats; struct rt6_statistics rt6_stats;
static kmem_cache_t * fib6_node_kmem; static kmem_cache_t * fib6_node_kmem __read_mostly;
enum fib_walk_state_t enum fib_walk_state_t
{ {
......
...@@ -55,7 +55,7 @@ ...@@ -55,7 +55,7 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics); DEFINE_SNMP_STAT(struct ipstats_mib, ipv6_statistics) __read_mostly;
static struct packet_type ipv6_packet_type = { static struct packet_type ipv6_packet_type = {
.type = __constant_htons(ETH_P_IPV6), .type = __constant_htons(ETH_P_IPV6),
......
...@@ -59,7 +59,7 @@ ...@@ -59,7 +59,7 @@
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/seq_file.h> #include <linux/seq_file.h>
DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6); DEFINE_SNMP_STAT(struct udp_mib, udp_stats_in6) __read_mostly;
/* Grrr, addr_type already calculated by caller, but I don't want /* Grrr, addr_type already calculated by caller, but I don't want
* to add some silly "cookie" argument to this method just for that. * to add some silly "cookie" argument to this method just for that.
......
...@@ -79,7 +79,7 @@ static u32 xfrm6_tunnel_spi; ...@@ -79,7 +79,7 @@ static u32 xfrm6_tunnel_spi;
#define XFRM6_TUNNEL_SPI_MIN 1 #define XFRM6_TUNNEL_SPI_MIN 1
#define XFRM6_TUNNEL_SPI_MAX 0xffffffff #define XFRM6_TUNNEL_SPI_MAX 0xffffffff
static kmem_cache_t *xfrm6_tunnel_spi_kmem; static kmem_cache_t *xfrm6_tunnel_spi_kmem __read_mostly;
#define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256 #define XFRM6_TUNNEL_SPI_BYADDR_HSIZE 256
#define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256 #define XFRM6_TUNNEL_SPI_BYSPI_HSIZE 256
......
...@@ -62,7 +62,7 @@ ...@@ -62,7 +62,7 @@
/* Global data structures. */ /* Global data structures. */
struct sctp_globals sctp_globals; struct sctp_globals sctp_globals;
struct proc_dir_entry *proc_net_sctp; struct proc_dir_entry *proc_net_sctp;
DEFINE_SNMP_STAT(struct sctp_mib, sctp_statistics); DEFINE_SNMP_STAT(struct sctp_mib, sctp_statistics) __read_mostly;
struct idr sctp_assocs_id; struct idr sctp_assocs_id;
DEFINE_SPINLOCK(sctp_assocs_id_lock); DEFINE_SPINLOCK(sctp_assocs_id_lock);
...@@ -78,8 +78,8 @@ static struct sctp_pf *sctp_pf_inet_specific; ...@@ -78,8 +78,8 @@ static struct sctp_pf *sctp_pf_inet_specific;
static struct sctp_af *sctp_af_v4_specific; static struct sctp_af *sctp_af_v4_specific;
static struct sctp_af *sctp_af_v6_specific; static struct sctp_af *sctp_af_v6_specific;
kmem_cache_t *sctp_chunk_cachep; kmem_cache_t *sctp_chunk_cachep __read_mostly;
kmem_cache_t *sctp_bucket_cachep; kmem_cache_t *sctp_bucket_cachep __read_mostly;
extern int sctp_snmp_proc_init(void); extern int sctp_snmp_proc_init(void);
extern int sctp_snmp_proc_exit(void); extern int sctp_snmp_proc_exit(void);
......
...@@ -274,7 +274,7 @@ int move_addr_to_user(void *kaddr, int klen, void __user *uaddr, int __user *ule ...@@ -274,7 +274,7 @@ int move_addr_to_user(void *kaddr, int klen, void __user *uaddr, int __user *ule
#define SOCKFS_MAGIC 0x534F434B #define SOCKFS_MAGIC 0x534F434B
static kmem_cache_t * sock_inode_cachep; static kmem_cache_t * sock_inode_cachep __read_mostly;
static struct inode *sock_alloc_inode(struct super_block *sb) static struct inode *sock_alloc_inode(struct super_block *sb)
{ {
...@@ -333,7 +333,7 @@ static struct super_block *sockfs_get_sb(struct file_system_type *fs_type, ...@@ -333,7 +333,7 @@ static struct super_block *sockfs_get_sb(struct file_system_type *fs_type,
return get_sb_pseudo(fs_type, "socket:", &sockfs_ops, SOCKFS_MAGIC); return get_sb_pseudo(fs_type, "socket:", &sockfs_ops, SOCKFS_MAGIC);
} }
static struct vfsmount *sock_mnt; static struct vfsmount *sock_mnt __read_mostly;
static struct file_system_type sock_fs_type = { static struct file_system_type sock_fs_type = {
.name = "sockfs", .name = "sockfs",
......
...@@ -28,13 +28,13 @@ ...@@ -28,13 +28,13 @@
#include <linux/workqueue.h> #include <linux/workqueue.h>
#include <linux/sunrpc/rpc_pipe_fs.h> #include <linux/sunrpc/rpc_pipe_fs.h>
static struct vfsmount *rpc_mount; static struct vfsmount *rpc_mount __read_mostly;
static int rpc_mount_count; static int rpc_mount_count;
static struct file_system_type rpc_pipe_fs_type; static struct file_system_type rpc_pipe_fs_type;
static kmem_cache_t *rpc_inode_cachep; static kmem_cache_t *rpc_inode_cachep __read_mostly;
#define RPC_UPCALL_TIMEOUT (30*HZ) #define RPC_UPCALL_TIMEOUT (30*HZ)
......
...@@ -34,10 +34,10 @@ static int rpc_task_id; ...@@ -34,10 +34,10 @@ static int rpc_task_id;
#define RPC_BUFFER_MAXSIZE (2048) #define RPC_BUFFER_MAXSIZE (2048)
#define RPC_BUFFER_POOLSIZE (8) #define RPC_BUFFER_POOLSIZE (8)
#define RPC_TASK_POOLSIZE (8) #define RPC_TASK_POOLSIZE (8)
static kmem_cache_t *rpc_task_slabp; static kmem_cache_t *rpc_task_slabp __read_mostly;
static kmem_cache_t *rpc_buffer_slabp; static kmem_cache_t *rpc_buffer_slabp __read_mostly;
static mempool_t *rpc_task_mempool; static mempool_t *rpc_task_mempool __read_mostly;
static mempool_t *rpc_buffer_mempool; static mempool_t *rpc_buffer_mempool __read_mostly;
static void __rpc_default_timer(struct rpc_task *task); static void __rpc_default_timer(struct rpc_task *task);
static void rpciod_killall(void); static void rpciod_killall(void);
......
...@@ -12,7 +12,7 @@ ...@@ -12,7 +12,7 @@
#include <net/ip.h> #include <net/ip.h>
#include <net/xfrm.h> #include <net/xfrm.h>
static kmem_cache_t *secpath_cachep; static kmem_cache_t *secpath_cachep __read_mostly;
void __secpath_destroy(struct sec_path *sp) void __secpath_destroy(struct sec_path *sp)
{ {
......
...@@ -37,7 +37,7 @@ EXPORT_SYMBOL(xfrm_policy_list); ...@@ -37,7 +37,7 @@ EXPORT_SYMBOL(xfrm_policy_list);
static DEFINE_RWLOCK(xfrm_policy_afinfo_lock); static DEFINE_RWLOCK(xfrm_policy_afinfo_lock);
static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO]; static struct xfrm_policy_afinfo *xfrm_policy_afinfo[NPROTO];
static kmem_cache_t *xfrm_dst_cache; static kmem_cache_t *xfrm_dst_cache __read_mostly;
static struct work_struct xfrm_policy_gc_work; static struct work_struct xfrm_policy_gc_work;
static struct list_head xfrm_policy_gc_list = static struct list_head xfrm_policy_gc_list =
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment