Commit dfec072e authored by Vegard Nossum's avatar Vegard Nossum

kmemcheck: add the kmemcheck core

General description: kmemcheck is a patch to the linux kernel that
detects use of uninitialized memory. It does this by trapping every
read and write to memory that was allocated dynamically (e.g. using
kmalloc()). If a memory address is read that has not previously been
written to, a message is printed to the kernel log.

Thanks to Andi Kleen for the set_memory_4k() solution.

Andrew Morton suggested documenting the shadow member of struct page.
Signed-off-by: default avatarVegard Nossum <vegardno@ifi.uio.no>
Signed-off-by: default avatarPekka Enberg <penberg@cs.helsinki.fi>

[export kmemcheck_mark_initialized]
[build fix for setup_max_cpus]
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>

[rebased for mainline inclusion]
Signed-off-by: default avatarVegard Nossum <vegardno@ifi.uio.no>
parent e594c8de
......@@ -81,6 +81,11 @@ ifdef CONFIG_CC_STACKPROTECTOR
endif
endif
# Don't unroll struct assignments with kmemcheck enabled
ifeq ($(CONFIG_KMEMCHECK),y)
KBUILD_CFLAGS += $(call cc-option,-fno-builtin-memcpy)
endif
# Stackpointer is addressed different for 32 bit and 64 bit x86
sp-$(CONFIG_X86_32) := esp
sp-$(CONFIG_X86_64) := rsp
......
#ifndef ASM_X86_KMEMCHECK_H
#define ASM_X86_KMEMCHECK_H
#include <linux/types.h>
#include <asm/ptrace.h>
#ifdef CONFIG_KMEMCHECK
bool kmemcheck_active(struct pt_regs *regs);
void kmemcheck_show(struct pt_regs *regs);
void kmemcheck_hide(struct pt_regs *regs);
bool kmemcheck_fault(struct pt_regs *regs,
unsigned long address, unsigned long error_code);
bool kmemcheck_trap(struct pt_regs *regs);
#else
static inline bool kmemcheck_active(struct pt_regs *regs)
{
return false;
}
static inline void kmemcheck_show(struct pt_regs *regs)
{
}
static inline void kmemcheck_hide(struct pt_regs *regs)
{
}
static inline bool kmemcheck_fault(struct pt_regs *regs,
unsigned long address, unsigned long error_code)
{
return false;
}
static inline bool kmemcheck_trap(struct pt_regs *regs)
{
return false;
}
#endif /* CONFIG_KMEMCHECK */
#endif
......@@ -317,6 +317,15 @@ static inline int pte_present(pte_t a)
return pte_flags(a) & (_PAGE_PRESENT | _PAGE_PROTNONE);
}
static inline int pte_hidden(pte_t x)
{
#ifdef CONFIG_KMEMCHECK
return pte_flags(x) & _PAGE_HIDDEN;
#else
return 0;
#endif
}
static inline int pmd_present(pmd_t pmd)
{
return pmd_flags(pmd) & _PAGE_PRESENT;
......
......@@ -18,7 +18,7 @@
#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
#define _PAGE_BIT_UNUSED1 9 /* available for programmer */
#define _PAGE_BIT_IOMAP 10 /* flag used to indicate IO mapping */
#define _PAGE_BIT_UNUSED3 11
#define _PAGE_BIT_HIDDEN 11 /* hidden by kmemcheck */
#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
#define _PAGE_BIT_SPECIAL _PAGE_BIT_UNUSED1
#define _PAGE_BIT_CPA_TEST _PAGE_BIT_UNUSED1
......@@ -41,7 +41,7 @@
#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
#define _PAGE_UNUSED1 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED1)
#define _PAGE_IOMAP (_AT(pteval_t, 1) << _PAGE_BIT_IOMAP)
#define _PAGE_UNUSED3 (_AT(pteval_t, 1) << _PAGE_BIT_UNUSED3)
#define _PAGE_HIDDEN (_AT(pteval_t, 1) << _PAGE_BIT_HIDDEN)
#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
......
......@@ -10,6 +10,8 @@ obj-$(CONFIG_X86_PTDUMP) += dump_pagetables.o
obj-$(CONFIG_HIGHMEM) += highmem_32.o
obj-$(CONFIG_KMEMCHECK) += kmemcheck/
obj-$(CONFIG_MMIOTRACE) += mmiotrace.o
mmiotrace-y := kmmio.o pf_in.o mmio-mod.o
obj-$(CONFIG_MMIOTRACE_TEST) += testmmiotrace.o
......
obj-y := error.o kmemcheck.o opcode.o pte.o shadow.o
#include <linux/interrupt.h>
#include <linux/kdebug.h>
#include <linux/kmemcheck.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/ptrace.h>
#include <linux/stacktrace.h>
#include <linux/string.h>
#include "error.h"
#include "shadow.h"
enum kmemcheck_error_type {
KMEMCHECK_ERROR_INVALID_ACCESS,
KMEMCHECK_ERROR_BUG,
};
#define SHADOW_COPY_SIZE (1 << CONFIG_KMEMCHECK_SHADOW_COPY_SHIFT)
struct kmemcheck_error {
enum kmemcheck_error_type type;
union {
/* KMEMCHECK_ERROR_INVALID_ACCESS */
struct {
/* Kind of access that caused the error */
enum kmemcheck_shadow state;
/* Address and size of the erroneous read */
unsigned long address;
unsigned int size;
};
};
struct pt_regs regs;
struct stack_trace trace;
unsigned long trace_entries[32];
/* We compress it to a char. */
unsigned char shadow_copy[SHADOW_COPY_SIZE];
unsigned char memory_copy[SHADOW_COPY_SIZE];
};
/*
* Create a ring queue of errors to output. We can't call printk() directly
* from the kmemcheck traps, since this may call the console drivers and
* result in a recursive fault.
*/
static struct kmemcheck_error error_fifo[CONFIG_KMEMCHECK_QUEUE_SIZE];
static unsigned int error_count;
static unsigned int error_rd;
static unsigned int error_wr;
static unsigned int error_missed_count;
static struct kmemcheck_error *error_next_wr(void)
{
struct kmemcheck_error *e;
if (error_count == ARRAY_SIZE(error_fifo)) {
++error_missed_count;
return NULL;
}
e = &error_fifo[error_wr];
if (++error_wr == ARRAY_SIZE(error_fifo))
error_wr = 0;
++error_count;
return e;
}
static struct kmemcheck_error *error_next_rd(void)
{
struct kmemcheck_error *e;
if (error_count == 0)
return NULL;
e = &error_fifo[error_rd];
if (++error_rd == ARRAY_SIZE(error_fifo))
error_rd = 0;
--error_count;
return e;
}
static void do_wakeup(unsigned long);
static DECLARE_TASKLET(kmemcheck_tasklet, &do_wakeup, 0);
/*
* Save the context of an error report.
*/
void kmemcheck_error_save(enum kmemcheck_shadow state,
unsigned long address, unsigned int size, struct pt_regs *regs)
{
static unsigned long prev_ip;
struct kmemcheck_error *e;
void *shadow_copy;
void *memory_copy;
/* Don't report several adjacent errors from the same EIP. */
if (regs->ip == prev_ip)
return;
prev_ip = regs->ip;
e = error_next_wr();
if (!e)
return;
e->type = KMEMCHECK_ERROR_INVALID_ACCESS;
e->state = state;
e->address = address;
e->size = size;
/* Save regs */
memcpy(&e->regs, regs, sizeof(*regs));
/* Save stack trace */
e->trace.nr_entries = 0;
e->trace.entries = e->trace_entries;
e->trace.max_entries = ARRAY_SIZE(e->trace_entries);
e->trace.skip = 0;
save_stack_trace_bp(&e->trace, regs->bp);
/* Round address down to nearest 16 bytes */
shadow_copy = kmemcheck_shadow_lookup(address
& ~(SHADOW_COPY_SIZE - 1));
BUG_ON(!shadow_copy);
memcpy(e->shadow_copy, shadow_copy, SHADOW_COPY_SIZE);
kmemcheck_show_addr(address);
memory_copy = (void *) (address & ~(SHADOW_COPY_SIZE - 1));
memcpy(e->memory_copy, memory_copy, SHADOW_COPY_SIZE);
kmemcheck_hide_addr(address);
tasklet_hi_schedule_first(&kmemcheck_tasklet);
}
/*
* Save the context of a kmemcheck bug.
*/
void kmemcheck_error_save_bug(struct pt_regs *regs)
{
struct kmemcheck_error *e;
e = error_next_wr();
if (!e)
return;
e->type = KMEMCHECK_ERROR_BUG;
memcpy(&e->regs, regs, sizeof(*regs));
e->trace.nr_entries = 0;
e->trace.entries = e->trace_entries;
e->trace.max_entries = ARRAY_SIZE(e->trace_entries);
e->trace.skip = 1;
save_stack_trace(&e->trace);
tasklet_hi_schedule_first(&kmemcheck_tasklet);
}
void kmemcheck_error_recall(void)
{
static const char *desc[] = {
[KMEMCHECK_SHADOW_UNALLOCATED] = "unallocated",
[KMEMCHECK_SHADOW_UNINITIALIZED] = "uninitialized",
[KMEMCHECK_SHADOW_INITIALIZED] = "initialized",
[KMEMCHECK_SHADOW_FREED] = "freed",
};
static const char short_desc[] = {
[KMEMCHECK_SHADOW_UNALLOCATED] = 'a',
[KMEMCHECK_SHADOW_UNINITIALIZED] = 'u',
[KMEMCHECK_SHADOW_INITIALIZED] = 'i',
[KMEMCHECK_SHADOW_FREED] = 'f',
};
struct kmemcheck_error *e;
unsigned int i;
e = error_next_rd();
if (!e)
return;
switch (e->type) {
case KMEMCHECK_ERROR_INVALID_ACCESS:
printk(KERN_ERR "WARNING: kmemcheck: Caught %d-bit read "
"from %s memory (%p)\n",
8 * e->size, e->state < ARRAY_SIZE(desc) ?
desc[e->state] : "(invalid shadow state)",
(void *) e->address);
printk(KERN_INFO);
for (i = 0; i < SHADOW_COPY_SIZE; ++i)
printk("%02x", e->memory_copy[i]);
printk("\n");
printk(KERN_INFO);
for (i = 0; i < SHADOW_COPY_SIZE; ++i) {
if (e->shadow_copy[i] < ARRAY_SIZE(short_desc))
printk(" %c", short_desc[e->shadow_copy[i]]);
else
printk(" ?");
}
printk("\n");
printk(KERN_INFO "%*c\n", 2 + 2
* (int) (e->address & (SHADOW_COPY_SIZE - 1)), '^');
break;
case KMEMCHECK_ERROR_BUG:
printk(KERN_EMERG "ERROR: kmemcheck: Fatal error\n");
break;
}
__show_regs(&e->regs, 1);
print_stack_trace(&e->trace, 0);
}
static void do_wakeup(unsigned long data)
{
while (error_count > 0)
kmemcheck_error_recall();
if (error_missed_count > 0) {
printk(KERN_WARNING "kmemcheck: Lost %d error reports because "
"the queue was too small\n", error_missed_count);
error_missed_count = 0;
}
}
#ifndef ARCH__X86__MM__KMEMCHECK__ERROR_H
#define ARCH__X86__MM__KMEMCHECK__ERROR_H
#include <linux/ptrace.h>
#include "shadow.h"
void kmemcheck_error_save(enum kmemcheck_shadow state,
unsigned long address, unsigned int size, struct pt_regs *regs);
void kmemcheck_error_save_bug(struct pt_regs *regs);
void kmemcheck_error_recall(void);
#endif
This diff is collapsed.
#include <linux/types.h>
#include "opcode.h"
static bool opcode_is_prefix(uint8_t b)
{
return
/* Group 1 */
b == 0xf0 || b == 0xf2 || b == 0xf3
/* Group 2 */
|| b == 0x2e || b == 0x36 || b == 0x3e || b == 0x26
|| b == 0x64 || b == 0x65 || b == 0x2e || b == 0x3e
/* Group 3 */
|| b == 0x66
/* Group 4 */
|| b == 0x67;
}
static bool opcode_is_rex_prefix(uint8_t b)
{
return (b & 0xf0) == 0x40;
}
#define REX_W (1 << 3)
/*
* This is a VERY crude opcode decoder. We only need to find the size of the
* load/store that caused our #PF and this should work for all the opcodes
* that we care about. Moreover, the ones who invented this instruction set
* should be shot.
*/
void kmemcheck_opcode_decode(const uint8_t *op, unsigned int *size)
{
/* Default operand size */
int operand_size_override = 4;
/* prefixes */
for (; opcode_is_prefix(*op); ++op) {
if (*op == 0x66)
operand_size_override = 2;
}
#ifdef CONFIG_X86_64
/* REX prefix */
if (opcode_is_rex_prefix(*op)) {
uint8_t rex = *op;
++op;
if (rex & REX_W) {
switch (*op) {
case 0x63:
*size = 4;
return;
case 0x0f:
++op;
switch (*op) {
case 0xb6:
case 0xbe:
*size = 1;
return;
case 0xb7:
case 0xbf:
*size = 2;
return;
}
break;
}
*size = 8;
return;
}
}
#endif
/* escape opcode */
if (*op == 0x0f) {
++op;
/*
* This is move with zero-extend and sign-extend, respectively;
* we don't have to think about 0xb6/0xbe, because this is
* already handled in the conditional below.
*/
if (*op == 0xb7 || *op == 0xbf)
operand_size_override = 2;
}
*size = (*op & 1) ? operand_size_override : 1;
}
const uint8_t *kmemcheck_opcode_get_primary(const uint8_t *op)
{
/* skip prefixes */
while (opcode_is_prefix(*op))
++op;
if (opcode_is_rex_prefix(*op))
++op;
return op;
}
#ifndef ARCH__X86__MM__KMEMCHECK__OPCODE_H
#define ARCH__X86__MM__KMEMCHECK__OPCODE_H
#include <linux/types.h>
void kmemcheck_opcode_decode(const uint8_t *op, unsigned int *size);
const uint8_t *kmemcheck_opcode_get_primary(const uint8_t *op);
#endif
#include <linux/mm.h>
#include <asm/pgtable.h>
#include "pte.h"
pte_t *kmemcheck_pte_lookup(unsigned long address)
{
pte_t *pte;
unsigned int level;
pte = lookup_address(address, &level);
if (!pte)
return NULL;
if (level != PG_LEVEL_4K)
return NULL;
if (!pte_hidden(*pte))
return NULL;
return pte;
}
#ifndef ARCH__X86__MM__KMEMCHECK__PTE_H
#define ARCH__X86__MM__KMEMCHECK__PTE_H
#include <linux/mm.h>
#include <asm/pgtable.h>
pte_t *kmemcheck_pte_lookup(unsigned long address);
#endif
#include <linux/kmemcheck.h>
#include <linux/module.h>
#include <linux/mm.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include "pte.h"
#include "shadow.h"
/*
* Return the shadow address for the given address. Returns NULL if the
* address is not tracked.
*
* We need to be extremely careful not to follow any invalid pointers,
* because this function can be called for *any* possible address.
*/
void *kmemcheck_shadow_lookup(unsigned long address)
{
pte_t *pte;
struct page *page;
if (!virt_addr_valid(address))
return NULL;
pte = kmemcheck_pte_lookup(address);
if (!pte)
return NULL;
page = virt_to_page(address);
if (!page->shadow)
return NULL;
return page->shadow + (address & (PAGE_SIZE - 1));
}
static void mark_shadow(void *address, unsigned int n,
enum kmemcheck_shadow status)
{
unsigned long addr = (unsigned long) address;
unsigned long last_addr = addr + n - 1;
unsigned long page = addr & PAGE_MASK;
unsigned long last_page = last_addr & PAGE_MASK;
unsigned int first_n;
void *shadow;
/* If the memory range crosses a page boundary, stop there. */
if (page == last_page)
first_n = n;
else
first_n = page + PAGE_SIZE - addr;
shadow = kmemcheck_shadow_lookup(addr);
if (shadow)
memset(shadow, status, first_n);
addr += first_n;
n -= first_n;
/* Do full-page memset()s. */
while (n >= PAGE_SIZE) {
shadow = kmemcheck_shadow_lookup(addr);
if (shadow)
memset(shadow, status, PAGE_SIZE);
addr += PAGE_SIZE;
n -= PAGE_SIZE;
}
/* Do the remaining page, if any. */
if (n > 0) {
shadow = kmemcheck_shadow_lookup(addr);
if (shadow)
memset(shadow, status, n);
}
}
void kmemcheck_mark_unallocated(void *address, unsigned int n)
{
mark_shadow(address, n, KMEMCHECK_SHADOW_UNALLOCATED);
}
void kmemcheck_mark_uninitialized(void *address, unsigned int n)
{
mark_shadow(address, n, KMEMCHECK_SHADOW_UNINITIALIZED);
}
/*
* Fill the shadow memory of the given address such that the memory at that
* address is marked as being initialized.
*/
void kmemcheck_mark_initialized(void *address, unsigned int n)
{
mark_shadow(address, n, KMEMCHECK_SHADOW_INITIALIZED);
}
EXPORT_SYMBOL_GPL(kmemcheck_mark_initialized);
void kmemcheck_mark_freed(void *address, unsigned int n)
{
mark_shadow(address, n, KMEMCHECK_SHADOW_FREED);
}
void kmemcheck_mark_unallocated_pages(struct page *p, unsigned int n)
{
unsigned int i;
for (i = 0; i < n; ++i)
kmemcheck_mark_unallocated(page_address(&p[i]), PAGE_SIZE);
}
void kmemcheck_mark_uninitialized_pages(struct page *p, unsigned int n)
{
unsigned int i;
for (i = 0; i < n; ++i)
kmemcheck_mark_uninitialized(page_address(&p[i]), PAGE_SIZE);
}
enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size)
{
uint8_t *x;
unsigned int i;
x = shadow;
#ifdef CONFIG_KMEMCHECK_PARTIAL_OK
/*
* Make sure _some_ bytes are initialized. Gcc frequently generates
* code to access neighboring bytes.
*/
for (i = 0; i < size; ++i) {
if (x[i] == KMEMCHECK_SHADOW_INITIALIZED)
return x[i];
}
#else
/* All bytes must be initialized. */
for (i = 0; i < size; ++i) {
if (x[i] != KMEMCHECK_SHADOW_INITIALIZED)
return x[i];
}
#endif
return x[0];
}
void kmemcheck_shadow_set(void *shadow, unsigned int size)
{
uint8_t *x;
unsigned int i;
x = shadow;
for (i = 0; i < size; ++i)
x[i] = KMEMCHECK_SHADOW_INITIALIZED;
}
#ifndef ARCH__X86__MM__KMEMCHECK__SHADOW_H
#define ARCH__X86__MM__KMEMCHECK__SHADOW_H
enum kmemcheck_shadow {
KMEMCHECK_SHADOW_UNALLOCATED,
KMEMCHECK_SHADOW_UNINITIALIZED,
KMEMCHECK_SHADOW_INITIALIZED,
KMEMCHECK_SHADOW_FREED,
};
void *kmemcheck_shadow_lookup(unsigned long address);
enum kmemcheck_shadow kmemcheck_shadow_test(void *shadow, unsigned int size);
void kmemcheck_shadow_set(void *shadow, unsigned int size);
#endif
#ifndef LINUX_KMEMCHECK_H
#define LINUX_KMEMCHECK_H
#include <linux/mm_types.h>
#include <linux/types.h>
#ifdef CONFIG_KMEMCHECK
extern int kmemcheck_enabled;
int kmemcheck_show_addr(unsigned long address);
int kmemcheck_hide_addr(unsigned long address);
#else
#define kmemcheck_enabled 0
#endif /* CONFIG_KMEMCHECK */
#endif /* LINUX_KMEMCHECK_H */
......@@ -98,6 +98,14 @@ struct page {
#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
unsigned long debug_flags; /* Use atomic bitops on this */
#endif
#ifdef CONFIG_KMEMCHECK
/*
* kmemcheck wants to track the status of each byte in a page; this
* is a pointer to such a status block. NULL if not tracked.
*/
void *shadow;
#endif
};
/*
......
......@@ -65,6 +65,7 @@
#include <linux/idr.h>
#include <linux/ftrace.h>
#include <linux/async.h>
#include <linux/kmemcheck.h>
#include <linux/kmemtrace.h>
#include <trace/boot.h>
......
......@@ -27,6 +27,7 @@
#include <linux/security.h>
#include <linux/ctype.h>
#include <linux/utsname.h>
#include <linux/kmemcheck.h>
#include <linux/smp_lock.h>
#include <linux/fs.h>
#include <linux/init.h>
......@@ -959,6 +960,17 @@ static struct ctl_table kern_table[] = {
.proc_handler = &proc_dointvec,
},
#endif
#ifdef CONFIG_KMEMCHECK
{
.ctl_name = CTL_UNNUMBERED,
.procname = "kmemcheck",
.data = &kmemcheck_enabled,
.maxlen = sizeof(int),
.mode = 0644,
.proc_handler = &proc_dointvec,
},
#endif
/*
* NOTE: do not add new entries to this table unless you have read
* Documentation/sysctl/ctl_unnumbered.txt
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment