Commit e1783a24 authored by Christoph Lameter's avatar Christoph Lameter Committed by Tejun Heo

module: Use this_cpu_xx to dynamically allocate counters

Use cpu ops to deal with the per cpu data instead of a local_t. Reduces memory
requirements, cache footprint and decreases cycle counts.

The this_cpu_xx operations are also used for !SMP mode. Otherwise we could
not drop the use of __module_ref_addr() which would make per cpu data handling
complicated. this_cpu_xx operations have their own fallback for !SMP.

V8-V9:
- Leave include asm/module.h since ringbuffer.c depends on it. Nothing else
  does though. Another patch will deal with that.
- Remove spurious free.
Signed-off-by: default avatarChristoph Lameter <cl@linux-foundation.org>
Acked-by: default avatarRusty Russell <rusty@rustcorp.com.au>
Signed-off-by: default avatarTejun Heo <tj@kernel.org>
parent 38b7827f
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/tracepoint.h> #include <linux/tracepoint.h>
#include <asm/local.h> #include <asm/local.h>
#include <linux/percpu.h>
#include <asm/module.h> #include <asm/module.h>
#include <trace/events/module.h> #include <trace/events/module.h>
...@@ -363,11 +364,9 @@ struct module ...@@ -363,11 +364,9 @@ struct module
/* Destruction function. */ /* Destruction function. */
void (*exit)(void); void (*exit)(void);
#ifdef CONFIG_SMP struct module_ref {
char *refptr; int count;
#else } *refptr;
local_t ref;
#endif
#endif #endif
#ifdef CONFIG_CONSTRUCTORS #ifdef CONFIG_CONSTRUCTORS
...@@ -454,25 +453,16 @@ void __symbol_put(const char *symbol); ...@@ -454,25 +453,16 @@ void __symbol_put(const char *symbol);
#define symbol_put(x) __symbol_put(MODULE_SYMBOL_PREFIX #x) #define symbol_put(x) __symbol_put(MODULE_SYMBOL_PREFIX #x)
void symbol_put_addr(void *addr); void symbol_put_addr(void *addr);
static inline local_t *__module_ref_addr(struct module *mod, int cpu)
{
#ifdef CONFIG_SMP
return (local_t *) (mod->refptr + per_cpu_offset(cpu));
#else
return &mod->ref;
#endif
}
/* Sometimes we know we already have a refcount, and it's easier not /* Sometimes we know we already have a refcount, and it's easier not
to handle the error case (which only happens with rmmod --wait). */ to handle the error case (which only happens with rmmod --wait). */
static inline void __module_get(struct module *module) static inline void __module_get(struct module *module)
{ {
if (module) { if (module) {
unsigned int cpu = get_cpu(); preempt_disable();
local_inc(__module_ref_addr(module, cpu)); __this_cpu_inc(module->refptr->count);
trace_module_get(module, _THIS_IP_, trace_module_get(module, _THIS_IP_,
local_read(__module_ref_addr(module, cpu))); __this_cpu_read(module->refptr->count));
put_cpu(); preempt_enable();
} }
} }
...@@ -481,15 +471,17 @@ static inline int try_module_get(struct module *module) ...@@ -481,15 +471,17 @@ static inline int try_module_get(struct module *module)
int ret = 1; int ret = 1;
if (module) { if (module) {
unsigned int cpu = get_cpu(); preempt_disable();
if (likely(module_is_live(module))) { if (likely(module_is_live(module))) {
local_inc(__module_ref_addr(module, cpu)); __this_cpu_inc(module->refptr->count);
trace_module_get(module, _THIS_IP_, trace_module_get(module, _THIS_IP_,
local_read(__module_ref_addr(module, cpu))); __this_cpu_read(module->refptr->count));
} }
else else
ret = 0; ret = 0;
put_cpu();
preempt_enable();
} }
return ret; return ret;
} }
......
...@@ -474,9 +474,10 @@ static void module_unload_init(struct module *mod) ...@@ -474,9 +474,10 @@ static void module_unload_init(struct module *mod)
INIT_LIST_HEAD(&mod->modules_which_use_me); INIT_LIST_HEAD(&mod->modules_which_use_me);
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
local_set(__module_ref_addr(mod, cpu), 0); per_cpu_ptr(mod->refptr, cpu)->count = 0;
/* Hold reference count during initialization. */ /* Hold reference count during initialization. */
local_set(__module_ref_addr(mod, raw_smp_processor_id()), 1); __this_cpu_write(mod->refptr->count, 1);
/* Backwards compatibility macros put refcount during init. */ /* Backwards compatibility macros put refcount during init. */
mod->waiter = current; mod->waiter = current;
} }
...@@ -619,7 +620,7 @@ unsigned int module_refcount(struct module *mod) ...@@ -619,7 +620,7 @@ unsigned int module_refcount(struct module *mod)
int cpu; int cpu;
for_each_possible_cpu(cpu) for_each_possible_cpu(cpu)
total += local_read(__module_ref_addr(mod, cpu)); total += per_cpu_ptr(mod->refptr, cpu)->count;
return total; return total;
} }
EXPORT_SYMBOL(module_refcount); EXPORT_SYMBOL(module_refcount);
...@@ -796,14 +797,15 @@ static struct module_attribute refcnt = { ...@@ -796,14 +797,15 @@ static struct module_attribute refcnt = {
void module_put(struct module *module) void module_put(struct module *module)
{ {
if (module) { if (module) {
unsigned int cpu = get_cpu(); preempt_disable();
local_dec(__module_ref_addr(module, cpu)); __this_cpu_dec(module->refptr->count);
trace_module_put(module, _RET_IP_, trace_module_put(module, _RET_IP_,
local_read(__module_ref_addr(module, cpu))); __this_cpu_read(module->refptr->count));
/* Maybe they're waiting for us to drop reference? */ /* Maybe they're waiting for us to drop reference? */
if (unlikely(!module_is_live(module))) if (unlikely(!module_is_live(module)))
wake_up_process(module->waiter); wake_up_process(module->waiter);
put_cpu(); preempt_enable();
} }
} }
EXPORT_SYMBOL(module_put); EXPORT_SYMBOL(module_put);
...@@ -1394,9 +1396,9 @@ static void free_module(struct module *mod) ...@@ -1394,9 +1396,9 @@ static void free_module(struct module *mod)
kfree(mod->args); kfree(mod->args);
if (mod->percpu) if (mod->percpu)
percpu_modfree(mod->percpu); percpu_modfree(mod->percpu);
#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) #if defined(CONFIG_MODULE_UNLOAD)
if (mod->refptr) if (mod->refptr)
percpu_modfree(mod->refptr); free_percpu(mod->refptr);
#endif #endif
/* Free lock-classes: */ /* Free lock-classes: */
lockdep_free_key_range(mod->module_core, mod->core_size); lockdep_free_key_range(mod->module_core, mod->core_size);
...@@ -2159,9 +2161,8 @@ static noinline struct module *load_module(void __user *umod, ...@@ -2159,9 +2161,8 @@ static noinline struct module *load_module(void __user *umod,
mod = (void *)sechdrs[modindex].sh_addr; mod = (void *)sechdrs[modindex].sh_addr;
kmemleak_load_module(mod, hdr, sechdrs, secstrings); kmemleak_load_module(mod, hdr, sechdrs, secstrings);
#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) #if defined(CONFIG_MODULE_UNLOAD)
mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t), mod->refptr = alloc_percpu(struct module_ref);
mod->name);
if (!mod->refptr) { if (!mod->refptr) {
err = -ENOMEM; err = -ENOMEM;
goto free_init; goto free_init;
...@@ -2393,8 +2394,8 @@ static noinline struct module *load_module(void __user *umod, ...@@ -2393,8 +2394,8 @@ static noinline struct module *load_module(void __user *umod,
kobject_put(&mod->mkobj.kobj); kobject_put(&mod->mkobj.kobj);
free_unload: free_unload:
module_unload_free(mod); module_unload_free(mod);
#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) #if defined(CONFIG_MODULE_UNLOAD)
percpu_modfree(mod->refptr); free_percpu(mod->refptr);
free_init: free_init:
#endif #endif
module_free(mod, mod->module_init); module_free(mod, mod->module_init);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment