Commit 4f2294b6 authored by Catalin Marinas's avatar Catalin Marinas

kmemleak: Add modules support

This patch handles the kmemleak operations needed for modules loading so
that memory allocations from inside a module are properly tracked.
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent dbb1f81c
...@@ -53,6 +53,7 @@ ...@@ -53,6 +53,7 @@
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <linux/async.h> #include <linux/async.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#include <linux/kmemleak.h>
#if 0 #if 0
#define DEBUGP printk #define DEBUGP printk
...@@ -430,6 +431,7 @@ static void *percpu_modalloc(unsigned long size, unsigned long align, ...@@ -430,6 +431,7 @@ static void *percpu_modalloc(unsigned long size, unsigned long align,
unsigned long extra; unsigned long extra;
unsigned int i; unsigned int i;
void *ptr; void *ptr;
int cpu;
if (align > PAGE_SIZE) { if (align > PAGE_SIZE) {
printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n", printk(KERN_WARNING "%s: per-cpu alignment %li > %li\n",
...@@ -459,6 +461,11 @@ static void *percpu_modalloc(unsigned long size, unsigned long align, ...@@ -459,6 +461,11 @@ static void *percpu_modalloc(unsigned long size, unsigned long align,
if (!split_block(i, size)) if (!split_block(i, size))
return NULL; return NULL;
/* add the per-cpu scanning areas */
for_each_possible_cpu(cpu)
kmemleak_alloc(ptr + per_cpu_offset(cpu), size, 0,
GFP_KERNEL);
/* Mark allocated */ /* Mark allocated */
pcpu_size[i] = -pcpu_size[i]; pcpu_size[i] = -pcpu_size[i];
return ptr; return ptr;
...@@ -473,6 +480,7 @@ static void percpu_modfree(void *freeme) ...@@ -473,6 +480,7 @@ static void percpu_modfree(void *freeme)
{ {
unsigned int i; unsigned int i;
void *ptr = __per_cpu_start + block_size(pcpu_size[0]); void *ptr = __per_cpu_start + block_size(pcpu_size[0]);
int cpu;
/* First entry is core kernel percpu data. */ /* First entry is core kernel percpu data. */
for (i = 1; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) { for (i = 1; i < pcpu_num_used; ptr += block_size(pcpu_size[i]), i++) {
...@@ -484,6 +492,10 @@ static void percpu_modfree(void *freeme) ...@@ -484,6 +492,10 @@ static void percpu_modfree(void *freeme)
BUG(); BUG();
free: free:
/* remove the per-cpu scanning areas */
for_each_possible_cpu(cpu)
kmemleak_free(freeme + per_cpu_offset(cpu));
/* Merge with previous? */ /* Merge with previous? */
if (pcpu_size[i-1] >= 0) { if (pcpu_size[i-1] >= 0) {
pcpu_size[i-1] += pcpu_size[i]; pcpu_size[i-1] += pcpu_size[i];
...@@ -1876,6 +1888,36 @@ static void *module_alloc_update_bounds(unsigned long size) ...@@ -1876,6 +1888,36 @@ static void *module_alloc_update_bounds(unsigned long size)
return ret; return ret;
} }
#ifdef CONFIG_DEBUG_KMEMLEAK
static void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
Elf_Shdr *sechdrs, char *secstrings)
{
unsigned int i;
/* only scan the sections containing data */
kmemleak_scan_area(mod->module_core, (unsigned long)mod -
(unsigned long)mod->module_core,
sizeof(struct module), GFP_KERNEL);
for (i = 1; i < hdr->e_shnum; i++) {
if (!(sechdrs[i].sh_flags & SHF_ALLOC))
continue;
if (strncmp(secstrings + sechdrs[i].sh_name, ".data", 5) != 0
&& strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) != 0)
continue;
kmemleak_scan_area(mod->module_core, sechdrs[i].sh_addr -
(unsigned long)mod->module_core,
sechdrs[i].sh_size, GFP_KERNEL);
}
}
#else
static inline void kmemleak_load_module(struct module *mod, Elf_Ehdr *hdr,
Elf_Shdr *sechdrs, char *secstrings)
{
}
#endif
/* Allocate and load the module: note that size of section 0 is always /* Allocate and load the module: note that size of section 0 is always
zero, and we rely on this for optional sections. */ zero, and we rely on this for optional sections. */
static noinline struct module *load_module(void __user *umod, static noinline struct module *load_module(void __user *umod,
...@@ -2046,6 +2088,12 @@ static noinline struct module *load_module(void __user *umod, ...@@ -2046,6 +2088,12 @@ static noinline struct module *load_module(void __user *umod,
/* Do the allocs. */ /* Do the allocs. */
ptr = module_alloc_update_bounds(mod->core_size); ptr = module_alloc_update_bounds(mod->core_size);
/*
* The pointer to this block is stored in the module structure
* which is inside the block. Just mark it as not being a
* leak.
*/
kmemleak_not_leak(ptr);
if (!ptr) { if (!ptr) {
err = -ENOMEM; err = -ENOMEM;
goto free_percpu; goto free_percpu;
...@@ -2054,6 +2102,13 @@ static noinline struct module *load_module(void __user *umod, ...@@ -2054,6 +2102,13 @@ static noinline struct module *load_module(void __user *umod,
mod->module_core = ptr; mod->module_core = ptr;
ptr = module_alloc_update_bounds(mod->init_size); ptr = module_alloc_update_bounds(mod->init_size);
/*
* The pointer to this block is stored in the module structure
* which is inside the block. This block doesn't need to be
* scanned as it contains data and code that will be freed
* after the module is initialized.
*/
kmemleak_ignore(ptr);
if (!ptr && mod->init_size) { if (!ptr && mod->init_size) {
err = -ENOMEM; err = -ENOMEM;
goto free_core; goto free_core;
...@@ -2084,6 +2139,7 @@ static noinline struct module *load_module(void __user *umod, ...@@ -2084,6 +2139,7 @@ static noinline struct module *load_module(void __user *umod,
} }
/* Module has been moved. */ /* Module has been moved. */
mod = (void *)sechdrs[modindex].sh_addr; mod = (void *)sechdrs[modindex].sh_addr;
kmemleak_load_module(mod, hdr, sechdrs, secstrings);
#if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP) #if defined(CONFIG_MODULE_UNLOAD) && defined(CONFIG_SMP)
mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t), mod->refptr = percpu_modalloc(sizeof(local_t), __alignof__(local_t),
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment