Commit 89c03538 authored by Robert Olsson's avatar Robert Olsson Committed by David S. Miller

[NET]: Remove skb_head_pool.

parent a814cf52
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
* Ray VanTassle : Fixed --skb->lock in free * Ray VanTassle : Fixed --skb->lock in free
* Alan Cox : skb_copy copy arp field * Alan Cox : skb_copy copy arp field
* Andi Kleen : slabified it. * Andi Kleen : slabified it.
* Robert Olsson : Removed skb_head_pool
* *
* NOTE: * NOTE:
* The __skb_ routines should be called with interrupts * The __skb_ routines should be called with interrupts
...@@ -63,15 +64,8 @@ ...@@ -63,15 +64,8 @@
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/system.h> #include <asm/system.h>
int sysctl_hot_list_len = 128;
static kmem_cache_t *skbuff_head_cache; static kmem_cache_t *skbuff_head_cache;
static union {
struct sk_buff_head list;
char pad[SMP_CACHE_BYTES];
} skb_head_pool[NR_CPUS];
/* /*
* Keep out-of-line to prevent kernel bloat. * Keep out-of-line to prevent kernel bloat.
* __builtin_return_address is not used because it is not always * __builtin_return_address is not used because it is not always
...@@ -109,44 +103,6 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here) ...@@ -109,44 +103,6 @@ void skb_under_panic(struct sk_buff *skb, int sz, void *here)
BUG(); BUG();
} }
static __inline__ struct sk_buff *skb_head_from_pool(void)
{
struct sk_buff_head *list;
struct sk_buff *skb = NULL;
unsigned long flags;
local_irq_save(flags);
list = &skb_head_pool[smp_processor_id()].list;
if (skb_queue_len(list))
skb = __skb_dequeue(list);
local_irq_restore(flags);
return skb;
}
static __inline__ void skb_head_to_pool(struct sk_buff *skb)
{
struct sk_buff_head *list;
unsigned long flags;
local_irq_save(flags);
list = &skb_head_pool[smp_processor_id()].list;
if (skb_queue_len(list) < sysctl_hot_list_len) {
__skb_queue_head(list, skb);
local_irq_restore(flags);
return;
}
local_irq_restore(flags);
kmem_cache_free(skbuff_head_cache, skb);
}
/* Allocate a new skbuff. We do this ourselves so we can fill in a few /* Allocate a new skbuff. We do this ourselves so we can fill in a few
* 'private' fields and also do memory statistics to find all the * 'private' fields and also do memory statistics to find all the
* [BEEP] leaks. * [BEEP] leaks.
...@@ -174,13 +130,10 @@ struct sk_buff *alloc_skb(unsigned int size, int gfp_mask) ...@@ -174,13 +130,10 @@ struct sk_buff *alloc_skb(unsigned int size, int gfp_mask)
might_sleep(); might_sleep();
/* Get the HEAD */ /* Get the HEAD */
skb = skb_head_from_pool(); skb = kmem_cache_alloc(skbuff_head_cache,
if (!skb) { gfp_mask & ~__GFP_DMA);
skb = kmem_cache_alloc(skbuff_head_cache, if (!skb)
gfp_mask & ~__GFP_DMA); goto out;
if (!skb)
goto out;
}
/* Get the DATA. Size must match skb_add_mtu(). */ /* Get the DATA. Size must match skb_add_mtu(). */
size = SKB_DATA_ALIGN(size); size = SKB_DATA_ALIGN(size);
...@@ -204,7 +157,7 @@ struct sk_buff *alloc_skb(unsigned int size, int gfp_mask) ...@@ -204,7 +157,7 @@ struct sk_buff *alloc_skb(unsigned int size, int gfp_mask)
out: out:
return skb; return skb;
nodata: nodata:
skb_head_to_pool(skb); kmem_cache_free(skbuff_head_cache, skb);
skb = NULL; skb = NULL;
goto out; goto out;
} }
...@@ -254,7 +207,7 @@ static void skb_release_data(struct sk_buff *skb) ...@@ -254,7 +207,7 @@ static void skb_release_data(struct sk_buff *skb)
void kfree_skbmem(struct sk_buff *skb) void kfree_skbmem(struct sk_buff *skb)
{ {
skb_release_data(skb); skb_release_data(skb);
skb_head_to_pool(skb); kmem_cache_free(skbuff_head_cache, skb);
} }
/** /**
...@@ -309,13 +262,10 @@ void __kfree_skb(struct sk_buff *skb) ...@@ -309,13 +262,10 @@ void __kfree_skb(struct sk_buff *skb)
struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask) struct sk_buff *skb_clone(struct sk_buff *skb, int gfp_mask)
{ {
struct sk_buff *n = skb_head_from_pool(); struct sk_buff *n = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
if (!n) { if (!n)
n = kmem_cache_alloc(skbuff_head_cache, gfp_mask); return NULL;
if (!n)
return NULL;
}
#define C(x) n->x = skb->x #define C(x) n->x = skb->x
...@@ -1204,8 +1154,6 @@ void skb_add_mtu(int mtu) ...@@ -1204,8 +1154,6 @@ void skb_add_mtu(int mtu)
void __init skb_init(void) void __init skb_init(void)
{ {
int i;
skbuff_head_cache = kmem_cache_create("skbuff_head_cache", skbuff_head_cache = kmem_cache_create("skbuff_head_cache",
sizeof(struct sk_buff), sizeof(struct sk_buff),
0, 0,
...@@ -1213,7 +1161,4 @@ void __init skb_init(void) ...@@ -1213,7 +1161,4 @@ void __init skb_init(void)
NULL, NULL); NULL, NULL);
if (!skbuff_head_cache) if (!skbuff_head_cache)
panic("cannot create skbuff cache"); panic("cannot create skbuff cache");
for (i = 0; i < NR_CPUS; i++)
skb_queue_head_init(&skb_head_pool[i].list);
} }
...@@ -28,7 +28,6 @@ extern __u32 sysctl_rmem_default; ...@@ -28,7 +28,6 @@ extern __u32 sysctl_rmem_default;
extern int sysctl_core_destroy_delay; extern int sysctl_core_destroy_delay;
extern int sysctl_optmem_max; extern int sysctl_optmem_max;
extern int sysctl_hot_list_len;
#ifdef CONFIG_NET_DIVERT #ifdef CONFIG_NET_DIVERT
extern char sysctl_divert_version[]; extern char sysctl_divert_version[];
...@@ -150,14 +149,6 @@ ctl_table core_table[] = { ...@@ -150,14 +149,6 @@ ctl_table core_table[] = {
.mode = 0644, .mode = 0644,
.proc_handler = &proc_dointvec .proc_handler = &proc_dointvec
}, },
{
.ctl_name = NET_CORE_HOT_LIST_LENGTH,
.procname = "hot_list_length",
.data = &sysctl_hot_list_len,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec
},
#ifdef CONFIG_NET_DIVERT #ifdef CONFIG_NET_DIVERT
{ {
.ctl_name = NET_CORE_DIVERT_VERSION, .ctl_name = NET_CORE_DIVERT_VERSION,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment