Commit bf19f75e authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] slab: Rework the slab timer code to use add_timer_on

Manfred had all this weird code to schedule a kernel thread onto a
different CPU just so that we could bond a timer to that CPU.

Convert it all to use the new add_timer_on().
parent fd1425d5
...@@ -473,7 +473,7 @@ enum { ...@@ -473,7 +473,7 @@ enum {
FULL FULL
} g_cpucache_up; } g_cpucache_up;
static struct timer_list reap_timer[NR_CPUS]; static struct timer_list reap_timers[NR_CPUS];
static void reap_timer_fnc(unsigned long data); static void reap_timer_fnc(unsigned long data);
...@@ -507,58 +507,24 @@ static void cache_estimate (unsigned long gfporder, size_t size, ...@@ -507,58 +507,24 @@ static void cache_estimate (unsigned long gfporder, size_t size,
*left_over = wastage; *left_over = wastage;
} }
static void do_timerstart(void *arg) /*
* Start the reap timer running on the target CPU. We run at around 1 to 2Hz.
* Add the CPU number into the expiry time to minimize the possibility of the
* CPUs getting into lockstep and contending for the global cache chain lock.
*/
static void start_cpu_timer(int cpu)
{ {
int cpu = smp_processor_id(); struct timer_list *rt = &reap_timers[cpu];
if (reap_timer[cpu].function == 0) {
printk(KERN_INFO "slab: reap timer started for cpu %d.\n", cpu); if (rt->function == NULL) {
init_timer(&reap_timer[cpu]); printk(KERN_INFO "slab: reap timer started for cpu %d\n", cpu);
reap_timer[cpu].expires = jiffies + HZ + 3*cpu; init_timer(rt);
reap_timer[cpu].function = reap_timer_fnc; rt->expires = jiffies + HZ + 3*cpu;
add_timer(&reap_timer[cpu]); rt->function = reap_timer_fnc;
add_timer_on(rt, cpu);
} }
} }
/* This doesn't belong here, should be somewhere in kernel/ */
struct cpucall_info {
int cpu;
void (*fnc)(void*arg);
void *arg;
};
static int cpucall_thread(void *__info)
{
struct cpucall_info *info = (struct cpucall_info *)__info;
/* Migrate to the right CPU */
daemonize();
set_cpus_allowed(current, 1UL << info->cpu);
BUG_ON(smp_processor_id() != info->cpu);
info->fnc(info->arg);
kfree(info);
return 0;
}
static int do_cpucall(void (*fnc)(void *arg), void *arg, int cpu)
{
struct cpucall_info *info;
info = kmalloc(sizeof(*info), GFP_KERNEL);
if (!info) {
printk(KERN_INFO "do_cpucall for cpu %d, callback %p failed at kmalloc.\n",
cpu, fnc);
return -1;
}
info->cpu = cpu;
info->fnc = fnc;
info->arg = arg;
if (kernel_thread(cpucall_thread, info, CLONE_KERNEL) < 0) {
printk(KERN_INFO "do_cpucall for cpu %d, callback %p failed at kernel_thread.\n",
cpu, fnc);
return -1;
}
return 0;
}
/* /*
* Note: if someone calls kmem_cache_alloc() on the new * Note: if someone calls kmem_cache_alloc() on the new
* cpu before the cpuup callback had a chance to allocate * cpu before the cpuup callback had a chance to allocate
...@@ -598,7 +564,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb, ...@@ -598,7 +564,7 @@ static int __devinit cpuup_callback(struct notifier_block *nfb,
} }
if (g_cpucache_up == FULL) if (g_cpucache_up == FULL)
do_cpucall(do_timerstart, NULL, cpu); start_cpu_timer(cpu);
up(&cache_chain_sem); up(&cache_chain_sem);
} }
...@@ -699,7 +665,7 @@ void __init kmem_cache_sizes_init(void) ...@@ -699,7 +665,7 @@ void __init kmem_cache_sizes_init(void)
int __init cpucache_init(void) int __init cpucache_init(void)
{ {
kmem_cache_t *cachep; kmem_cache_t *cachep;
int i; int cpu;
down(&cache_chain_sem); down(&cache_chain_sem);
g_cpucache_up = FULL; g_cpucache_up = FULL;
...@@ -712,9 +678,9 @@ int __init cpucache_init(void) ...@@ -712,9 +678,9 @@ int __init cpucache_init(void)
* pages to gfp. * pages to gfp.
*/ */
for (i=0;i<NR_CPUS;i++) { for (cpu = 0; cpu < NR_CPUS; cpu++) {
if (cpu_online(i)) if (cpu_online(cpu))
do_cpucall(do_timerstart, NULL, i); start_cpu_timer(cpu);
} }
up(&cache_chain_sem); up(&cache_chain_sem);
...@@ -1991,8 +1957,8 @@ static void enable_cpucache (kmem_cache_t *cachep) ...@@ -1991,8 +1957,8 @@ static void enable_cpucache (kmem_cache_t *cachep)
* *
* Called from a timer, every few seconds * Called from a timer, every few seconds
* Purpuse: * Purpuse:
* - clear the per-cpu caches * - clear the per-cpu caches for this CPU.
* - return freeable pages to gfp. * - return freeable pages to the main free memory pool.
*/ */
static inline void cache_reap (void) static inline void cache_reap (void)
{ {
...@@ -2076,12 +2042,18 @@ static inline void cache_reap (void) ...@@ -2076,12 +2042,18 @@ static inline void cache_reap (void)
read_unlock(&cache_chain_lock); read_unlock(&cache_chain_lock);
} }
/*
* This is a timer handler. There is on per CPU. It is called periodially
* to shrink this CPU's caches. Otherwise there could be memory tied up
* for long periods (or for ever) due to load changes.
*/
static void reap_timer_fnc(unsigned long data) static void reap_timer_fnc(unsigned long data)
{ {
cache_reap(); int cpu = smp_processor_id();
struct timer_list *rt = &reap_timers[cpu];
reap_timer[smp_processor_id()].expires = jiffies + REAPTIMEOUT_CPUC; cache_reap();
add_timer(&reap_timer[smp_processor_id()]); mod_timer(rt, jiffies + REAPTIMEOUT_CPUC + cpu);
} }
#ifdef CONFIG_PROC_FS #ifdef CONFIG_PROC_FS
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment