Commit 2a7fbcec authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman

Merge tag 'lkdtm-next' of...

Merge tag 'lkdtm-next' of git://git.kernel.org/pub/scm/linux/kernel/git/kees/linux into char-misc-testing

Kees writes:

refactoring for multiple source files and better layout
parents e2402b1d c479e3fd
......@@ -59,7 +59,11 @@ obj-$(CONFIG_CXL_BASE) += cxl/
obj-$(CONFIG_PANEL) += panel.o
lkdtm-$(CONFIG_LKDTM) += lkdtm_core.o
lkdtm-$(CONFIG_LKDTM) += lkdtm_bugs.o
lkdtm-$(CONFIG_LKDTM) += lkdtm_heap.o
lkdtm-$(CONFIG_LKDTM) += lkdtm_perms.o
lkdtm-$(CONFIG_LKDTM) += lkdtm_rodata_objcopy.o
lkdtm-$(CONFIG_LKDTM) += lkdtm_usercopy.o
OBJCOPYFLAGS :=
OBJCOPYFLAGS_lkdtm_rodata_objcopy.o := \
......
#ifndef __LKDTM_H
#define __LKDTM_H
/* lkdtm_bugs.c */
void __init lkdtm_bugs_init(int *recur_param);
void lkdtm_PANIC(void);
void lkdtm_BUG(void);
void lkdtm_WARNING(void);
void lkdtm_EXCEPTION(void);
void lkdtm_LOOP(void);
void lkdtm_OVERFLOW(void);
void lkdtm_CORRUPT_STACK(void);
void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void);
void lkdtm_SOFTLOCKUP(void);
void lkdtm_HARDLOCKUP(void);
void lkdtm_SPINLOCKUP(void);
void lkdtm_HUNG_TASK(void);
void lkdtm_ATOMIC_UNDERFLOW(void);
void lkdtm_ATOMIC_OVERFLOW(void);
/* lkdtm_heap.c */
void lkdtm_OVERWRITE_ALLOCATION(void);
void lkdtm_WRITE_AFTER_FREE(void);
void lkdtm_READ_AFTER_FREE(void);
void lkdtm_WRITE_BUDDY_AFTER_FREE(void);
void lkdtm_READ_BUDDY_AFTER_FREE(void);
/* lkdtm_perms.c */
void __init lkdtm_perms_init(void);
void lkdtm_WRITE_RO(void);
void lkdtm_WRITE_RO_AFTER_INIT(void);
void lkdtm_WRITE_KERN(void);
void lkdtm_EXEC_DATA(void);
void lkdtm_EXEC_STACK(void);
void lkdtm_EXEC_KMALLOC(void);
void lkdtm_EXEC_VMALLOC(void);
void lkdtm_EXEC_RODATA(void);
void lkdtm_EXEC_USERSPACE(void);
void lkdtm_ACCESS_USERSPACE(void);
/* lkdtm_rodata.c */
void lkdtm_rodata_do_nothing(void);
/* lkdtm_usercopy.c */
void __init lkdtm_usercopy_init(void);
void __exit lkdtm_usercopy_exit(void);
void lkdtm_USERCOPY_HEAP_SIZE_TO(void);
void lkdtm_USERCOPY_HEAP_SIZE_FROM(void);
void lkdtm_USERCOPY_HEAP_FLAG_TO(void);
void lkdtm_USERCOPY_HEAP_FLAG_FROM(void);
void lkdtm_USERCOPY_STACK_FRAME_TO(void);
void lkdtm_USERCOPY_STACK_FRAME_FROM(void);
void lkdtm_USERCOPY_STACK_BEYOND(void);
void lkdtm_USERCOPY_KERNEL(void);
#endif
/*
* This is for all the tests related to logic bugs (e.g. bad dereferences,
* bad alignment, bad loops, bad locking, bad scheduling, deep stacks, and
* lockups) along with other things that don't fit well into existing LKDTM
* test source files.
*/
#define pr_fmt(fmt) "lkdtm: " fmt
#include <linux/kernel.h>
#include <linux/sched.h>
#include "lkdtm.h"
/*
* Make sure our attempts to over run the kernel stack doesn't trigger
* a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
* recurse past the end of THREAD_SIZE by default.
*/
#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
#define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2)
#else
#define REC_STACK_SIZE (THREAD_SIZE / 8)
#endif
#define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
static int recur_count = REC_NUM_DEFAULT;
static DEFINE_SPINLOCK(lock_me_up);
static int recursive_loop(int remaining)
{
char buf[REC_STACK_SIZE];
/* Make sure compiler does not optimize this away. */
memset(buf, (remaining & 0xff) | 0x1, REC_STACK_SIZE);
if (!remaining)
return 0;
else
return recursive_loop(remaining - 1);
}
/* If the depth is negative, use the default, otherwise keep parameter. */
void __init lkdtm_bugs_init(int *recur_param)
{
if (*recur_param < 0)
*recur_param = recur_count;
else
recur_count = *recur_param;
}
void lkdtm_PANIC(void)
{
panic("dumptest");
}
void lkdtm_BUG(void)
{
BUG();
}
void lkdtm_WARNING(void)
{
WARN_ON(1);
}
void lkdtm_EXCEPTION(void)
{
*((int *) 0) = 0;
}
void lkdtm_LOOP(void)
{
for (;;)
;
}
void lkdtm_OVERFLOW(void)
{
(void) recursive_loop(recur_count);
}
noinline void lkdtm_CORRUPT_STACK(void)
{
/* Use default char array length that triggers stack protection. */
char data[8];
memset((void *)data, 0, 64);
}
void lkdtm_UNALIGNED_LOAD_STORE_WRITE(void)
{
static u8 data[5] __attribute__((aligned(4))) = {1, 2, 3, 4, 5};
u32 *p;
u32 val = 0x12345678;
p = (u32 *)(data + 1);
if (*p == 0)
val = 0x87654321;
*p = val;
}
void lkdtm_SOFTLOCKUP(void)
{
preempt_disable();
for (;;)
cpu_relax();
}
void lkdtm_HARDLOCKUP(void)
{
local_irq_disable();
for (;;)
cpu_relax();
}
void lkdtm_SPINLOCKUP(void)
{
/* Must be called twice to trigger. */
spin_lock(&lock_me_up);
/* Let sparse know we intended to exit holding the lock. */
__release(&lock_me_up);
}
void lkdtm_HUNG_TASK(void)
{
set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
}
void lkdtm_ATOMIC_UNDERFLOW(void)
{
atomic_t under = ATOMIC_INIT(INT_MIN);
pr_info("attempting good atomic increment\n");
atomic_inc(&under);
atomic_dec(&under);
pr_info("attempting bad atomic underflow\n");
atomic_dec(&under);
}
void lkdtm_ATOMIC_OVERFLOW(void)
{
atomic_t over = ATOMIC_INIT(INT_MAX);
pr_info("attempting good atomic decrement\n");
atomic_dec(&over);
atomic_inc(&over);
pr_info("attempting bad atomic overflow\n");
atomic_inc(&over);
}
......@@ -44,9 +44,6 @@
#include <linux/slab.h>
#include <scsi/scsi_cmnd.h>
#include <linux/debugfs.h>
#include <linux/vmalloc.h>
#include <linux/mman.h>
#include <asm/cacheflush.h>
#ifdef CONFIG_IDE
#include <linux/ide.h>
......@@ -54,164 +51,20 @@
#include "lkdtm.h"
/*
* Make sure our attempts to over run the kernel stack doesn't trigger
* a compiler warning when CONFIG_FRAME_WARN is set. Then make sure we
* recurse past the end of THREAD_SIZE by default.
*/
#if defined(CONFIG_FRAME_WARN) && (CONFIG_FRAME_WARN > 0)
#define REC_STACK_SIZE (CONFIG_FRAME_WARN / 2)
#else
#define REC_STACK_SIZE (THREAD_SIZE / 8)
#endif
#define REC_NUM_DEFAULT ((THREAD_SIZE / REC_STACK_SIZE) * 2)
#define DEFAULT_COUNT 10
#define EXEC_SIZE 64
enum cname {
CN_INVALID,
CN_INT_HARDWARE_ENTRY,
CN_INT_HW_IRQ_EN,
CN_INT_TASKLET_ENTRY,
CN_FS_DEVRW,
CN_MEM_SWAPOUT,
CN_TIMERADD,
CN_SCSI_DISPATCH_CMD,
CN_IDE_CORE_CP,
CN_DIRECT,
};
enum ctype {
CT_NONE,
CT_PANIC,
CT_BUG,
CT_WARNING,
CT_EXCEPTION,
CT_LOOP,
CT_OVERFLOW,
CT_CORRUPT_STACK,
CT_UNALIGNED_LOAD_STORE_WRITE,
CT_OVERWRITE_ALLOCATION,
CT_WRITE_AFTER_FREE,
CT_READ_AFTER_FREE,
CT_WRITE_BUDDY_AFTER_FREE,
CT_READ_BUDDY_AFTER_FREE,
CT_SOFTLOCKUP,
CT_HARDLOCKUP,
CT_SPINLOCKUP,
CT_HUNG_TASK,
CT_EXEC_DATA,
CT_EXEC_STACK,
CT_EXEC_KMALLOC,
CT_EXEC_VMALLOC,
CT_EXEC_RODATA,
CT_EXEC_USERSPACE,
CT_ACCESS_USERSPACE,
CT_WRITE_RO,
CT_WRITE_RO_AFTER_INIT,
CT_WRITE_KERN,
CT_ATOMIC_UNDERFLOW,
CT_ATOMIC_OVERFLOW,
CT_USERCOPY_HEAP_SIZE_TO,
CT_USERCOPY_HEAP_SIZE_FROM,
CT_USERCOPY_HEAP_FLAG_TO,
CT_USERCOPY_HEAP_FLAG_FROM,
CT_USERCOPY_STACK_FRAME_TO,
CT_USERCOPY_STACK_FRAME_FROM,
CT_USERCOPY_STACK_BEYOND,
};
static char* cp_name[] = {
"INT_HARDWARE_ENTRY",
"INT_HW_IRQ_EN",
"INT_TASKLET_ENTRY",
"FS_DEVRW",
"MEM_SWAPOUT",
"TIMERADD",
"SCSI_DISPATCH_CMD",
"IDE_CORE_CP",
"DIRECT",
};
static char* cp_type[] = {
"PANIC",
"BUG",
"WARNING",
"EXCEPTION",
"LOOP",
"OVERFLOW",
"CORRUPT_STACK",
"UNALIGNED_LOAD_STORE_WRITE",
"OVERWRITE_ALLOCATION",
"WRITE_AFTER_FREE",
"READ_AFTER_FREE",
"WRITE_BUDDY_AFTER_FREE",
"READ_BUDDY_AFTER_FREE",
"SOFTLOCKUP",
"HARDLOCKUP",
"SPINLOCKUP",
"HUNG_TASK",
"EXEC_DATA",
"EXEC_STACK",
"EXEC_KMALLOC",
"EXEC_VMALLOC",
"EXEC_RODATA",
"EXEC_USERSPACE",
"ACCESS_USERSPACE",
"WRITE_RO",
"WRITE_RO_AFTER_INIT",
"WRITE_KERN",
"ATOMIC_UNDERFLOW",
"ATOMIC_OVERFLOW",
"USERCOPY_HEAP_SIZE_TO",
"USERCOPY_HEAP_SIZE_FROM",
"USERCOPY_HEAP_FLAG_TO",
"USERCOPY_HEAP_FLAG_FROM",
"USERCOPY_STACK_FRAME_TO",
"USERCOPY_STACK_FRAME_FROM",
"USERCOPY_STACK_BEYOND",
};
static struct jprobe lkdtm;
static int lkdtm_parse_commandline(void);
static void lkdtm_handler(void);
static int lkdtm_debugfs_open(struct inode *inode, struct file *file);
static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
size_t count, loff_t *off);
static ssize_t direct_entry(struct file *f, const char __user *user_buf,
size_t count, loff_t *off);
static ssize_t lkdtm_debugfs_entry(struct file *f,
const char __user *user_buf,
size_t count, loff_t *off);
static char* cpoint_name;
static char* cpoint_type;
static int cpoint_count = DEFAULT_COUNT;
static int recur_count = REC_NUM_DEFAULT;
static int alloc_size = 1024;
static size_t cache_size;
static enum cname cpoint = CN_INVALID;
static enum ctype cptype = CT_NONE;
static int count = DEFAULT_COUNT;
static DEFINE_SPINLOCK(count_lock);
static DEFINE_SPINLOCK(lock_me_up);
static u8 data_area[EXEC_SIZE];
static struct kmem_cache *bad_cache;
static const unsigned char test_text[] = "This is a test.\n";
static const unsigned long rodata = 0xAA55AA55;
static unsigned long ro_after_init __ro_after_init = 0x55AA5500;
module_param(recur_count, int, 0644);
MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test");
module_param(cpoint_name, charp, 0444);
MODULE_PARM_DESC(cpoint_name, " Crash Point, where kernel is to be crashed");
module_param(cpoint_type, charp, 0444);
MODULE_PARM_DESC(cpoint_type, " Crash Point Type, action to be taken on "\
"hitting the crash point");
module_param(cpoint_count, int, 0644);
MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\
"crash point is to be hit to trigger action");
module_param(alloc_size, int, 0644);
MODULE_PARM_DESC(alloc_size, " Size of allocation for user copy tests "\
"(from 1 to PAGE_SIZE)");
/* jprobe entry point handlers. */
static unsigned int jp_do_irq(unsigned int irq)
{
lkdtm_handler();
......@@ -276,780 +129,220 @@ static int jp_generic_ide_ioctl(ide_drive_t *drive, struct file *file,
}
#endif
/* Return the crashpoint number or NONE if the name is invalid */
static enum ctype parse_cp_type(const char *what, size_t count)
{
int i;
for (i = 0; i < ARRAY_SIZE(cp_type); i++) {
if (!strcmp(what, cp_type[i]))
return i + 1;
}
return CT_NONE;
}
static const char *cp_type_to_str(enum ctype type)
{
if (type == CT_NONE || type < 0 || type > ARRAY_SIZE(cp_type))
return "None";
return cp_type[type - 1];
}
static const char *cp_name_to_str(enum cname name)
{
if (name == CN_INVALID || name < 0 || name > ARRAY_SIZE(cp_name))
return "INVALID";
return cp_name[name - 1];
}
static int lkdtm_parse_commandline(void)
{
int i;
unsigned long flags;
if (cpoint_count < 1 || recur_count < 1)
return -EINVAL;
spin_lock_irqsave(&count_lock, flags);
count = cpoint_count;
spin_unlock_irqrestore(&count_lock, flags);
/* No special parameters */
if (!cpoint_type && !cpoint_name)
return 0;
/* Neither or both of these need to be set */
if (!cpoint_type || !cpoint_name)
return -EINVAL;
cptype = parse_cp_type(cpoint_type, strlen(cpoint_type));
if (cptype == CT_NONE)
return -EINVAL;
for (i = 0; i < ARRAY_SIZE(cp_name); i++) {
if (!strcmp(cpoint_name, cp_name[i])) {
cpoint = i + 1;
return 0;
}
}
/* Could not find a valid crash point */
return -EINVAL;
}
static int recursive_loop(int remaining)
{
char buf[REC_STACK_SIZE];
/* Make sure compiler does not optimize this away. */
memset(buf, (remaining & 0xff) | 0x1, REC_STACK_SIZE);
if (!remaining)
return 0;
else
return recursive_loop(remaining - 1);
}
static void do_nothing(void)
{
return;
}
/* Must immediately follow do_nothing for size calculuations to work out. */
static void do_overwritten(void)
{
pr_info("do_overwritten wasn't overwritten!\n");
return;
}
/* Crash points */
struct crashpoint {
const char *name;
const struct file_operations fops;
struct jprobe jprobe;
};
static noinline void corrupt_stack(void)
{
/* Use default char array length that triggers stack protection. */
char data[8];
#define CRASHPOINT(_name, _write, _symbol, _entry) \
{ \
.name = _name, \
.fops = { \
.read = lkdtm_debugfs_read, \
.llseek = generic_file_llseek, \
.open = lkdtm_debugfs_open, \
.write = _write, \
}, \
.jprobe = { \
.kp.symbol_name = _symbol, \
.entry = (kprobe_opcode_t *)_entry, \
}, \
}
/* Define the possible places where we can trigger a crash point. */
struct crashpoint crashpoints[] = {
CRASHPOINT("DIRECT", direct_entry,
NULL, NULL),
#ifdef CONFIG_KPROBES
CRASHPOINT("INT_HARDWARE_ENTRY", lkdtm_debugfs_entry,
"do_IRQ", jp_do_irq),
CRASHPOINT("INT_HW_IRQ_EN", lkdtm_debugfs_entry,
"handle_IRQ_event", jp_handle_irq_event),
CRASHPOINT("INT_TASKLET_ENTRY", lkdtm_debugfs_entry,
"tasklet_action", jp_tasklet_action),
CRASHPOINT("FS_DEVRW", lkdtm_debugfs_entry,
"ll_rw_block", jp_ll_rw_block),
CRASHPOINT("MEM_SWAPOUT", lkdtm_debugfs_entry,
"shrink_inactive_list", jp_shrink_inactive_list),
CRASHPOINT("TIMERADD", lkdtm_debugfs_entry,
"hrtimer_start", jp_hrtimer_start),
CRASHPOINT("SCSI_DISPATCH_CMD", lkdtm_debugfs_entry,
"scsi_dispatch_cmd", jp_scsi_dispatch_cmd),
# ifdef CONFIG_IDE
CRASHPOINT("IDE_CORE_CP", lkdtm_debugfs_entry,
"generic_ide_ioctl", jp_generic_ide_ioctl),
# endif
#endif
};
memset((void *)data, 0, 64);
}
static noinline void execute_location(void *dst, bool write)
{
void (*func)(void) = dst;
/* Crash types. */
struct crashtype {
const char *name;
void (*func)(void);
};
pr_info("attempting ok execution at %p\n", do_nothing);
do_nothing();
#define CRASHTYPE(_name) \
{ \
.name = __stringify(_name), \
.func = lkdtm_ ## _name, \
}
/* Define the possible types of crashes that can be triggered. */
struct crashtype crashtypes[] = {
CRASHTYPE(PANIC),
CRASHTYPE(BUG),
CRASHTYPE(WARNING),
CRASHTYPE(EXCEPTION),
CRASHTYPE(LOOP),
CRASHTYPE(OVERFLOW),
CRASHTYPE(CORRUPT_STACK),
CRASHTYPE(UNALIGNED_LOAD_STORE_WRITE),
CRASHTYPE(OVERWRITE_ALLOCATION),
CRASHTYPE(WRITE_AFTER_FREE),
CRASHTYPE(READ_AFTER_FREE),
CRASHTYPE(WRITE_BUDDY_AFTER_FREE),
CRASHTYPE(READ_BUDDY_AFTER_FREE),
CRASHTYPE(SOFTLOCKUP),
CRASHTYPE(HARDLOCKUP),
CRASHTYPE(SPINLOCKUP),
CRASHTYPE(HUNG_TASK),
CRASHTYPE(EXEC_DATA),
CRASHTYPE(EXEC_STACK),
CRASHTYPE(EXEC_KMALLOC),
CRASHTYPE(EXEC_VMALLOC),
CRASHTYPE(EXEC_RODATA),
CRASHTYPE(EXEC_USERSPACE),
CRASHTYPE(ACCESS_USERSPACE),
CRASHTYPE(WRITE_RO),
CRASHTYPE(WRITE_RO_AFTER_INIT),
CRASHTYPE(WRITE_KERN),
CRASHTYPE(ATOMIC_UNDERFLOW),
CRASHTYPE(ATOMIC_OVERFLOW),
CRASHTYPE(USERCOPY_HEAP_SIZE_TO),
CRASHTYPE(USERCOPY_HEAP_SIZE_FROM),
CRASHTYPE(USERCOPY_HEAP_FLAG_TO),
CRASHTYPE(USERCOPY_HEAP_FLAG_FROM),
CRASHTYPE(USERCOPY_STACK_FRAME_TO),
CRASHTYPE(USERCOPY_STACK_FRAME_FROM),
CRASHTYPE(USERCOPY_STACK_BEYOND),
CRASHTYPE(USERCOPY_KERNEL),
};
if (write) {
memcpy(dst, do_nothing, EXEC_SIZE);
flush_icache_range((unsigned long)dst,
(unsigned long)dst + EXEC_SIZE);
}
pr_info("attempting bad execution at %p\n", func);
func();
}
static void execute_user_location(void *dst)
{
/* Intentionally crossing kernel/user memory boundary. */
void (*func)(void) = dst;
/* Global jprobe entry and crashtype. */
static struct jprobe *lkdtm_jprobe;
struct crashpoint *lkdtm_crashpoint;
struct crashtype *lkdtm_crashtype;
pr_info("attempting ok execution at %p\n", do_nothing);
do_nothing();
/* Global crash counter and spinlock. */
static int crash_count = DEFAULT_COUNT;
static DEFINE_SPINLOCK(crash_count_lock);
if (copy_to_user((void __user *)dst, do_nothing, EXEC_SIZE))
return;
flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE);
pr_info("attempting bad execution at %p\n", func);
func();
}
/* Module parameters */
static int recur_count = -1;
module_param(recur_count, int, 0644);
MODULE_PARM_DESC(recur_count, " Recursion level for the stack overflow test");
/*
* Instead of adding -Wno-return-local-addr, just pass the stack address
* through a function to obfuscate it from the compiler.
*/
static noinline unsigned char *trick_compiler(unsigned char *stack)
{
return stack + 0;
}
static char* cpoint_name;
module_param(cpoint_name, charp, 0444);
MODULE_PARM_DESC(cpoint_name, " Crash Point, where kernel is to be crashed");
static noinline unsigned char *do_usercopy_stack_callee(int value)
{
unsigned char buf[32];
int i;
static char* cpoint_type;
module_param(cpoint_type, charp, 0444);
MODULE_PARM_DESC(cpoint_type, " Crash Point Type, action to be taken on "\
"hitting the crash point");
/* Exercise stack to avoid everything living in registers. */
for (i = 0; i < sizeof(buf); i++) {
buf[i] = value & 0xff;
}
static int cpoint_count = DEFAULT_COUNT;
module_param(cpoint_count, int, 0644);
MODULE_PARM_DESC(cpoint_count, " Crash Point Count, number of times the "\
"crash point is to be hit to trigger action");
return trick_compiler(buf);
}
static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
/* Return the crashtype number or NULL if the name is invalid */
static struct crashtype *find_crashtype(const char *name)
{
unsigned long user_addr;
unsigned char good_stack[32];
unsigned char *bad_stack;
int i;
/* Exercise stack to avoid everything living in registers. */
for (i = 0; i < sizeof(good_stack); i++)
good_stack[i] = test_text[i % sizeof(test_text)];
/* This is a pointer to outside our current stack frame. */
if (bad_frame) {
bad_stack = do_usercopy_stack_callee(alloc_size);
} else {
/* Put start address just inside stack. */
bad_stack = task_stack_page(current) + THREAD_SIZE;
bad_stack -= sizeof(unsigned long);
}
user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANONYMOUS | MAP_PRIVATE, 0);
if (user_addr >= TASK_SIZE) {
pr_warn("Failed to allocate user memory\n");
return;
}
if (to_user) {
pr_info("attempting good copy_to_user of local stack\n");
if (copy_to_user((void __user *)user_addr, good_stack,
sizeof(good_stack))) {
pr_warn("copy_to_user failed unexpectedly?!\n");
goto free_user;
}
pr_info("attempting bad copy_to_user of distant stack\n");
if (copy_to_user((void __user *)user_addr, bad_stack,
sizeof(good_stack))) {
pr_warn("copy_to_user failed, but lacked Oops\n");
goto free_user;
}
} else {
/*
* There isn't a safe way to not be protected by usercopy
* if we're going to write to another thread's stack.
*/
if (!bad_frame)
goto free_user;
pr_info("attempting good copy_from_user of local stack\n");
if (copy_from_user(good_stack, (void __user *)user_addr,
sizeof(good_stack))) {
pr_warn("copy_from_user failed unexpectedly?!\n");
goto free_user;
}
pr_info("attempting bad copy_from_user of distant stack\n");
if (copy_from_user(bad_stack, (void __user *)user_addr,
sizeof(good_stack))) {
pr_warn("copy_from_user failed, but lacked Oops\n");
goto free_user;
}
}
free_user:
vm_munmap(user_addr, PAGE_SIZE);
}
static void do_usercopy_heap_size(bool to_user)
{
unsigned long user_addr;
unsigned char *one, *two;
size_t size = clamp_t(int, alloc_size, 1, PAGE_SIZE);
one = kmalloc(size, GFP_KERNEL);
two = kmalloc(size, GFP_KERNEL);
if (!one || !two) {
pr_warn("Failed to allocate kernel memory\n");
goto free_kernel;
}
user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANONYMOUS | MAP_PRIVATE, 0);
if (user_addr >= TASK_SIZE) {
pr_warn("Failed to allocate user memory\n");
goto free_kernel;
}
memset(one, 'A', size);
memset(two, 'B', size);
if (to_user) {
pr_info("attempting good copy_to_user of correct size\n");
if (copy_to_user((void __user *)user_addr, one, size)) {
pr_warn("copy_to_user failed unexpectedly?!\n");
goto free_user;
}
pr_info("attempting bad copy_to_user of too large size\n");
if (copy_to_user((void __user *)user_addr, one, 2 * size)) {
pr_warn("copy_to_user failed, but lacked Oops\n");
goto free_user;
}
} else {
pr_info("attempting good copy_from_user of correct size\n");
if (copy_from_user(one, (void __user *)user_addr,
size)) {
pr_warn("copy_from_user failed unexpectedly?!\n");
goto free_user;
for (i = 0; i < ARRAY_SIZE(crashtypes); i++) {
if (!strcmp(name, crashtypes[i].name))
return &crashtypes[i];
}
pr_info("attempting bad copy_from_user of too large size\n");
if (copy_from_user(one, (void __user *)user_addr, 2 * size)) {
pr_warn("copy_from_user failed, but lacked Oops\n");
goto free_user;
}
}
free_user:
vm_munmap(user_addr, PAGE_SIZE);
free_kernel:
kfree(one);
kfree(two);
return NULL;
}
static void do_usercopy_heap_flag(bool to_user)
{
unsigned long user_addr;
unsigned char *good_buf = NULL;
unsigned char *bad_buf = NULL;
/* Make sure cache was prepared. */
if (!bad_cache) {
pr_warn("Failed to allocate kernel cache\n");
return;
}
/*
* Allocate one buffer from each cache (kmalloc will have the
* SLAB_USERCOPY flag already, but "bad_cache" won't).
/*
* This is forced noinline just so it distinctly shows up in the stackdump
* which makes validation of expected lkdtm crashes easier.
*/
good_buf = kmalloc(cache_size, GFP_KERNEL);
bad_buf = kmem_cache_alloc(bad_cache, GFP_KERNEL);
if (!good_buf || !bad_buf) {
pr_warn("Failed to allocate buffers from caches\n");
goto free_alloc;
}
/* Allocate user memory we'll poke at. */
user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANONYMOUS | MAP_PRIVATE, 0);
if (user_addr >= TASK_SIZE) {
pr_warn("Failed to allocate user memory\n");
goto free_alloc;
}
memset(good_buf, 'A', cache_size);
memset(bad_buf, 'B', cache_size);
if (to_user) {
pr_info("attempting good copy_to_user with SLAB_USERCOPY\n");
if (copy_to_user((void __user *)user_addr, good_buf,
cache_size)) {
pr_warn("copy_to_user failed unexpectedly?!\n");
goto free_user;
}
pr_info("attempting bad copy_to_user w/o SLAB_USERCOPY\n");
if (copy_to_user((void __user *)user_addr, bad_buf,
cache_size)) {
pr_warn("copy_to_user failed, but lacked Oops\n");
goto free_user;
}
} else {
pr_info("attempting good copy_from_user with SLAB_USERCOPY\n");
if (copy_from_user(good_buf, (void __user *)user_addr,
cache_size)) {
pr_warn("copy_from_user failed unexpectedly?!\n");
goto free_user;
}
pr_info("attempting bad copy_from_user w/o SLAB_USERCOPY\n");
if (copy_from_user(bad_buf, (void __user *)user_addr,
cache_size)) {
pr_warn("copy_from_user failed, but lacked Oops\n");
goto free_user;
}
}
free_user:
vm_munmap(user_addr, PAGE_SIZE);
free_alloc:
if (bad_buf)
kmem_cache_free(bad_cache, bad_buf);
kfree(good_buf);
}
static void lkdtm_do_action(enum ctype which)
static noinline void lkdtm_do_action(struct crashtype *crashtype)
{
switch (which) {
case CT_PANIC:
panic("dumptest");
break;
case CT_BUG:
BUG();
break;
case CT_WARNING:
WARN_ON(1);
break;
case CT_EXCEPTION:
*((int *) 0) = 0;
break;
case CT_LOOP:
for (;;)
;
break;
case CT_OVERFLOW:
(void) recursive_loop(recur_count);
break;
case CT_CORRUPT_STACK:
corrupt_stack();
break;
case CT_UNALIGNED_LOAD_STORE_WRITE: {
static u8 data[5] __attribute__((aligned(4))) = {1, 2,
3, 4, 5};
u32 *p;
u32 val = 0x12345678;
p = (u32 *)(data + 1);
if (*p == 0)
val = 0x87654321;
*p = val;
break;
}
case CT_OVERWRITE_ALLOCATION: {
size_t len = 1020;
u32 *data = kmalloc(len, GFP_KERNEL);
data[1024 / sizeof(u32)] = 0x12345678;
kfree(data);
break;
}
case CT_WRITE_AFTER_FREE: {
int *base, *again;
size_t len = 1024;
/*
* The slub allocator uses the first word to store the free
* pointer in some configurations. Use the middle of the
* allocation to avoid running into the freelist
*/
size_t offset = (len / sizeof(*base)) / 2;
base = kmalloc(len, GFP_KERNEL);
pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]);
pr_info("Attempting bad write to freed memory at %p\n",
&base[offset]);
kfree(base);
base[offset] = 0x0abcdef0;
/* Attempt to notice the overwrite. */
again = kmalloc(len, GFP_KERNEL);
kfree(again);
if (again != base)
pr_info("Hmm, didn't get the same memory range.\n");
break;
}
case CT_READ_AFTER_FREE: {
int *base, *val, saw;
size_t len = 1024;
/*
* The slub allocator uses the first word to store the free
* pointer in some configurations. Use the middle of the
* allocation to avoid running into the freelist
*/
size_t offset = (len / sizeof(*base)) / 2;
base = kmalloc(len, GFP_KERNEL);
if (!base)
break;
val = kmalloc(len, GFP_KERNEL);
if (!val) {
kfree(base);
break;
}
*val = 0x12345678;
base[offset] = *val;
pr_info("Value in memory before free: %x\n", base[offset]);
kfree(base);
pr_info("Attempting bad read from freed memory\n");
saw = base[offset];
if (saw != *val) {
/* Good! Poisoning happened, so declare a win. */
pr_info("Memory correctly poisoned (%x)\n", saw);
BUG();
}
pr_info("Memory was not poisoned\n");
kfree(val);
break;
}
case CT_WRITE_BUDDY_AFTER_FREE: {
unsigned long p = __get_free_page(GFP_KERNEL);
if (!p)
break;
pr_info("Writing to the buddy page before free\n");
memset((void *)p, 0x3, PAGE_SIZE);
free_page(p);
schedule();
pr_info("Attempting bad write to the buddy page after free\n");
memset((void *)p, 0x78, PAGE_SIZE);
/* Attempt to notice the overwrite. */
p = __get_free_page(GFP_KERNEL);
free_page(p);
schedule();
break;
}
case CT_READ_BUDDY_AFTER_FREE: {
unsigned long p = __get_free_page(GFP_KERNEL);
int saw, *val;
int *base;
if (!p)
break;
val = kmalloc(1024, GFP_KERNEL);
if (!val) {
free_page(p);
break;
}
base = (int *)p;
*val = 0x12345678;
base[0] = *val;
pr_info("Value in memory before free: %x\n", base[0]);
free_page(p);
pr_info("Attempting to read from freed memory\n");
saw = base[0];
if (saw != *val) {
/* Good! Poisoning happened, so declare a win. */
pr_info("Memory correctly poisoned (%x)\n", saw);
BUG();
}
pr_info("Buddy page was not poisoned\n");
kfree(val);
break;
}
case CT_SOFTLOCKUP:
preempt_disable();
for (;;)
cpu_relax();
break;
case CT_HARDLOCKUP:
local_irq_disable();
for (;;)
cpu_relax();
break;
case CT_SPINLOCKUP:
/* Must be called twice to trigger. */
spin_lock(&lock_me_up);
/* Let sparse know we intended to exit holding the lock. */
__release(&lock_me_up);
break;
case CT_HUNG_TASK:
set_current_state(TASK_UNINTERRUPTIBLE);
schedule();
break;
case CT_EXEC_DATA:
execute_location(data_area, true);
break;
case CT_EXEC_STACK: {
u8 stack_area[EXEC_SIZE];
execute_location(stack_area, true);
break;
}
case CT_EXEC_KMALLOC: {
u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL);
execute_location(kmalloc_area, true);
kfree(kmalloc_area);
break;
}
case CT_EXEC_VMALLOC: {
u32 *vmalloc_area = vmalloc(EXEC_SIZE);
execute_location(vmalloc_area, true);
vfree(vmalloc_area);
break;
}
case CT_EXEC_RODATA:
execute_location(lkdtm_rodata_do_nothing, false);
break;
case CT_EXEC_USERSPACE: {
unsigned long user_addr;
user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANONYMOUS | MAP_PRIVATE, 0);
if (user_addr >= TASK_SIZE) {
pr_warn("Failed to allocate user memory\n");
return;
}
execute_user_location((void *)user_addr);
vm_munmap(user_addr, PAGE_SIZE);
break;
}
case CT_ACCESS_USERSPACE: {
unsigned long user_addr, tmp = 0;
unsigned long *ptr;
user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANONYMOUS | MAP_PRIVATE, 0);
if (user_addr >= TASK_SIZE) {
pr_warn("Failed to allocate user memory\n");
return;
}
if (copy_to_user((void __user *)user_addr, &tmp, sizeof(tmp))) {
pr_warn("copy_to_user failed\n");
vm_munmap(user_addr, PAGE_SIZE);
return;
}
ptr = (unsigned long *)user_addr;
pr_info("attempting bad read at %p\n", ptr);
tmp = *ptr;
tmp += 0xc0dec0de;
pr_info("attempting bad write at %p\n", ptr);
*ptr = tmp;
vm_munmap(user_addr, PAGE_SIZE);
break;
}
case CT_WRITE_RO: {
/* Explicitly cast away "const" for the test. */
unsigned long *ptr = (unsigned long *)&rodata;
pr_info("attempting bad rodata write at %p\n", ptr);
*ptr ^= 0xabcd1234;
break;
}
case CT_WRITE_RO_AFTER_INIT: {
unsigned long *ptr = &ro_after_init;
/*
* Verify we were written to during init. Since an Oops
* is considered a "success", a failure is to just skip the
* real test.
*/
if ((*ptr & 0xAA) != 0xAA) {
pr_info("%p was NOT written during init!?\n", ptr);
break;
}
pr_info("attempting bad ro_after_init write at %p\n", ptr);
*ptr ^= 0xabcd1234;
break;
}
case CT_WRITE_KERN: {
size_t size;
unsigned char *ptr;
size = (unsigned long)do_overwritten -
(unsigned long)do_nothing;
ptr = (unsigned char *)do_overwritten;
pr_info("attempting bad %zu byte write at %p\n", size, ptr);
memcpy(ptr, (unsigned char *)do_nothing, size);
flush_icache_range((unsigned long)ptr,
(unsigned long)(ptr + size));
do_overwritten();
break;
}
case CT_ATOMIC_UNDERFLOW: {
atomic_t under = ATOMIC_INIT(INT_MIN);
pr_info("attempting good atomic increment\n");
atomic_inc(&under);
atomic_dec(&under);
pr_info("attempting bad atomic underflow\n");
atomic_dec(&under);
break;
}
case CT_ATOMIC_OVERFLOW: {
atomic_t over = ATOMIC_INIT(INT_MAX);
pr_info("attempting good atomic decrement\n");
atomic_dec(&over);
atomic_inc(&over);
pr_info("attempting bad atomic overflow\n");
atomic_inc(&over);
return;
}
case CT_USERCOPY_HEAP_SIZE_TO:
do_usercopy_heap_size(true);
break;
case CT_USERCOPY_HEAP_SIZE_FROM:
do_usercopy_heap_size(false);
break;
case CT_USERCOPY_HEAP_FLAG_TO:
do_usercopy_heap_flag(true);
break;
case CT_USERCOPY_HEAP_FLAG_FROM:
do_usercopy_heap_flag(false);
break;
case CT_USERCOPY_STACK_FRAME_TO:
do_usercopy_stack(true, true);
break;
case CT_USERCOPY_STACK_FRAME_FROM:
do_usercopy_stack(false, true);
break;
case CT_USERCOPY_STACK_BEYOND:
do_usercopy_stack(true, false);
break;
case CT_NONE:
default:
break;
}
BUG_ON(!crashtype || !crashtype->func);
crashtype->func();
}
/* Called by jprobe entry points. */
static void lkdtm_handler(void)
{
unsigned long flags;
bool do_it = false;
spin_lock_irqsave(&count_lock, flags);
count--;
BUG_ON(!lkdtm_crashpoint || !lkdtm_crashtype);
spin_lock_irqsave(&crash_count_lock, flags);
crash_count--;
pr_info("Crash point %s of type %s hit, trigger in %d rounds\n",
cp_name_to_str(cpoint), cp_type_to_str(cptype), count);
lkdtm_crashpoint->name, lkdtm_crashtype->name, crash_count);
if (count == 0) {
if (crash_count == 0) {
do_it = true;
count = cpoint_count;
crash_count = cpoint_count;
}
spin_unlock_irqrestore(&count_lock, flags);
spin_unlock_irqrestore(&crash_count_lock, flags);
if (do_it)
lkdtm_do_action(cptype);
lkdtm_do_action(lkdtm_crashtype);
}
static int lkdtm_register_cpoint(enum cname which)
static int lkdtm_register_cpoint(struct crashpoint *crashpoint,
struct crashtype *crashtype)
{
int ret;
cpoint = CN_INVALID;
if (lkdtm.entry != NULL)
unregister_jprobe(&lkdtm);
switch (which) {
case CN_DIRECT:
lkdtm_do_action(cptype);
/* If this doesn't have a symbol, just call immediately. */
if (!crashpoint->jprobe.kp.symbol_name) {
lkdtm_do_action(crashtype);
return 0;
case CN_INT_HARDWARE_ENTRY:
lkdtm.kp.symbol_name = "do_IRQ";
lkdtm.entry = (kprobe_opcode_t*) jp_do_irq;
break;
case CN_INT_HW_IRQ_EN:
lkdtm.kp.symbol_name = "handle_IRQ_event";
lkdtm.entry = (kprobe_opcode_t*) jp_handle_irq_event;
break;
case CN_INT_TASKLET_ENTRY:
lkdtm.kp.symbol_name = "tasklet_action";
lkdtm.entry = (kprobe_opcode_t*) jp_tasklet_action;
break;
case CN_FS_DEVRW:
lkdtm.kp.symbol_name = "ll_rw_block";
lkdtm.entry = (kprobe_opcode_t*) jp_ll_rw_block;
break;
case CN_MEM_SWAPOUT:
lkdtm.kp.symbol_name = "shrink_inactive_list";
lkdtm.entry = (kprobe_opcode_t*) jp_shrink_inactive_list;
break;
case CN_TIMERADD:
lkdtm.kp.symbol_name = "hrtimer_start";
lkdtm.entry = (kprobe_opcode_t*) jp_hrtimer_start;
break;
case CN_SCSI_DISPATCH_CMD:
lkdtm.kp.symbol_name = "scsi_dispatch_cmd";
lkdtm.entry = (kprobe_opcode_t*) jp_scsi_dispatch_cmd;
break;
case CN_IDE_CORE_CP:
#ifdef CONFIG_IDE
lkdtm.kp.symbol_name = "generic_ide_ioctl";
lkdtm.entry = (kprobe_opcode_t*) jp_generic_ide_ioctl;
#else
pr_info("Crash point not available\n");
return -EINVAL;
#endif
break;
default:
pr_info("Invalid Crash Point\n");
return -EINVAL;
}
cpoint = which;
if ((ret = register_jprobe(&lkdtm)) < 0) {
pr_info("Couldn't register jprobe\n");
cpoint = CN_INVALID;
if (lkdtm_jprobe != NULL)
unregister_jprobe(lkdtm_jprobe);
lkdtm_crashpoint = crashpoint;
lkdtm_crashtype = crashtype;
lkdtm_jprobe = &crashpoint->jprobe;
ret = register_jprobe(lkdtm_jprobe);
if (ret < 0) {
pr_info("Couldn't register jprobe %s\n",
crashpoint->jprobe.kp.symbol_name);
lkdtm_jprobe = NULL;
lkdtm_crashpoint = NULL;
lkdtm_crashtype = NULL;
}
return ret;
}
static ssize_t do_register_entry(enum cname which, struct file *f,
const char __user *user_buf, size_t count, loff_t *off)
static ssize_t lkdtm_debugfs_entry(struct file *f,
const char __user *user_buf,
size_t count, loff_t *off)
{
struct crashpoint *crashpoint = file_inode(f)->i_private;
struct crashtype *crashtype = NULL;
char *buf;
int err;
......@@ -1067,13 +360,13 @@ static ssize_t do_register_entry(enum cname which, struct file *f,
buf[count] = '\0';
strim(buf);
cptype = parse_cp_type(buf, count);
free_page((unsigned long) buf);
crashtype = find_crashtype(buf);
free_page((unsigned long)buf);
if (cptype == CT_NONE)
if (!crashtype)
return -EINVAL;
err = lkdtm_register_cpoint(which);
err = lkdtm_register_cpoint(crashpoint, crashtype);
if (err < 0)
return err;
......@@ -1094,8 +387,10 @@ static ssize_t lkdtm_debugfs_read(struct file *f, char __user *user_buf,
return -ENOMEM;
n = snprintf(buf, PAGE_SIZE, "Available crash types:\n");
for (i = 0; i < ARRAY_SIZE(cp_type); i++)
n += snprintf(buf + n, PAGE_SIZE - n, "%s\n", cp_type[i]);
for (i = 0; i < ARRAY_SIZE(crashtypes); i++) {
n += snprintf(buf + n, PAGE_SIZE - n, "%s\n",
crashtypes[i].name);
}
buf[n] = '\0';
out = simple_read_from_buffer(user_buf, count, off,
......@@ -1110,60 +405,11 @@ static int lkdtm_debugfs_open(struct inode *inode, struct file *file)
return 0;
}
static ssize_t int_hardware_entry(struct file *f, const char __user *buf,
size_t count, loff_t *off)
{
return do_register_entry(CN_INT_HARDWARE_ENTRY, f, buf, count, off);
}
static ssize_t int_hw_irq_en(struct file *f, const char __user *buf,
size_t count, loff_t *off)
{
return do_register_entry(CN_INT_HW_IRQ_EN, f, buf, count, off);
}
static ssize_t int_tasklet_entry(struct file *f, const char __user *buf,
size_t count, loff_t *off)
{
return do_register_entry(CN_INT_TASKLET_ENTRY, f, buf, count, off);
}
static ssize_t fs_devrw_entry(struct file *f, const char __user *buf,
size_t count, loff_t *off)
{
return do_register_entry(CN_FS_DEVRW, f, buf, count, off);
}
static ssize_t mem_swapout_entry(struct file *f, const char __user *buf,
size_t count, loff_t *off)
{
return do_register_entry(CN_MEM_SWAPOUT, f, buf, count, off);
}
static ssize_t timeradd_entry(struct file *f, const char __user *buf,
size_t count, loff_t *off)
{
return do_register_entry(CN_TIMERADD, f, buf, count, off);
}
static ssize_t scsi_dispatch_cmd_entry(struct file *f,
const char __user *buf, size_t count, loff_t *off)
{
return do_register_entry(CN_SCSI_DISPATCH_CMD, f, buf, count, off);
}
static ssize_t ide_core_cp_entry(struct file *f, const char __user *buf,
size_t count, loff_t *off)
{
return do_register_entry(CN_IDE_CORE_CP, f, buf, count, off);
}
/* Special entry to just crash directly. Available without KPROBEs */
static ssize_t direct_entry(struct file *f, const char __user *user_buf,
size_t count, loff_t *off)
{
enum ctype type;
struct crashtype *crashtype;
char *buf;
if (count >= PAGE_SIZE)
......@@ -1182,77 +428,61 @@ static ssize_t direct_entry(struct file *f, const char __user *user_buf,
buf[count] = '\0';
strim(buf);
type = parse_cp_type(buf, count);
crashtype = find_crashtype(buf);
free_page((unsigned long) buf);
if (type == CT_NONE)
if (!crashtype)
return -EINVAL;
pr_info("Performing direct entry %s\n", cp_type_to_str(type));
lkdtm_do_action(type);
pr_info("Performing direct entry %s\n", crashtype->name);
lkdtm_do_action(crashtype);
*off += count;
return count;
}
struct crash_entry {
const char *name;
const struct file_operations fops;
};
static const struct crash_entry crash_entries[] = {
{"DIRECT", {.read = lkdtm_debugfs_read,
.llseek = generic_file_llseek,
.open = lkdtm_debugfs_open,
.write = direct_entry} },
{"INT_HARDWARE_ENTRY", {.read = lkdtm_debugfs_read,
.llseek = generic_file_llseek,
.open = lkdtm_debugfs_open,
.write = int_hardware_entry} },
{"INT_HW_IRQ_EN", {.read = lkdtm_debugfs_read,
.llseek = generic_file_llseek,
.open = lkdtm_debugfs_open,
.write = int_hw_irq_en} },
{"INT_TASKLET_ENTRY", {.read = lkdtm_debugfs_read,
.llseek = generic_file_llseek,
.open = lkdtm_debugfs_open,
.write = int_tasklet_entry} },
{"FS_DEVRW", {.read = lkdtm_debugfs_read,
.llseek = generic_file_llseek,
.open = lkdtm_debugfs_open,
.write = fs_devrw_entry} },
{"MEM_SWAPOUT", {.read = lkdtm_debugfs_read,
.llseek = generic_file_llseek,
.open = lkdtm_debugfs_open,
.write = mem_swapout_entry} },
{"TIMERADD", {.read = lkdtm_debugfs_read,
.llseek = generic_file_llseek,
.open = lkdtm_debugfs_open,
.write = timeradd_entry} },
{"SCSI_DISPATCH_CMD", {.read = lkdtm_debugfs_read,
.llseek = generic_file_llseek,
.open = lkdtm_debugfs_open,
.write = scsi_dispatch_cmd_entry} },
{"IDE_CORE_CP", {.read = lkdtm_debugfs_read,
.llseek = generic_file_llseek,
.open = lkdtm_debugfs_open,
.write = ide_core_cp_entry} },
};
static struct dentry *lkdtm_debugfs_root;
static int __init lkdtm_module_init(void)
{
struct crashpoint *crashpoint = NULL;
struct crashtype *crashtype = NULL;
int ret = -EINVAL;
int n_debugfs_entries = 1; /* Assume only the direct entry */
int i;
/* Make sure we can write to __ro_after_init values during __init */
ro_after_init |= 0xAA;
/* Neither or both of these need to be set */
if ((cpoint_type || cpoint_name) && !(cpoint_type && cpoint_name)) {
pr_err("Need both cpoint_type and cpoint_name or neither\n");
return -EINVAL;
}
if (cpoint_type) {
crashtype = find_crashtype(cpoint_type);
if (!crashtype) {
pr_err("Unknown crashtype '%s'\n", cpoint_type);
return -EINVAL;
}
}
if (cpoint_name) {
for (i = 0; i < ARRAY_SIZE(crashpoints); i++) {
if (!strcmp(cpoint_name, crashpoints[i].name))
crashpoint = &crashpoints[i];
}
/* Refuse unknown crashpoints. */
if (!crashpoint) {
pr_err("Invalid crashpoint %s\n", cpoint_name);
return -EINVAL;
}
}
/* Set crash count. */
crash_count = cpoint_count;
/* Prepare cache that lacks SLAB_USERCOPY flag. */
cache_size = clamp_t(int, alloc_size, 1, PAGE_SIZE);
bad_cache = kmem_cache_create("lkdtm-no-usercopy", cache_size, 0,
0, NULL);
/* Handle test-specific initialization. */
lkdtm_bugs_init(&recur_count);
lkdtm_perms_init();
lkdtm_usercopy_init();
/* Register debugfs interface */
lkdtm_debugfs_root = debugfs_create_dir("provoke-crash", NULL);
......@@ -1261,35 +491,28 @@ static int __init lkdtm_module_init(void)
return -ENODEV;
}
#ifdef CONFIG_KPROBES
n_debugfs_entries = ARRAY_SIZE(crash_entries);
#endif
for (i = 0; i < n_debugfs_entries; i++) {
const struct crash_entry *cur = &crash_entries[i];
/* Install debugfs trigger files. */
for (i = 0; i < ARRAY_SIZE(crashpoints); i++) {
struct crashpoint *cur = &crashpoints[i];
struct dentry *de;
de = debugfs_create_file(cur->name, 0644, lkdtm_debugfs_root,
NULL, &cur->fops);
cur, &cur->fops);
if (de == NULL) {
pr_err("could not create %s\n", cur->name);
pr_err("could not create crashpoint %s\n", cur->name);
goto out_err;
}
}
if (lkdtm_parse_commandline() == -EINVAL) {
pr_info("Invalid command\n");
goto out_err;
}
if (cpoint != CN_INVALID && cptype != CT_NONE) {
ret = lkdtm_register_cpoint(cpoint);
/* Install crashpoint if one was selected. */
if (crashpoint) {
ret = lkdtm_register_cpoint(crashpoint, crashtype);
if (ret < 0) {
pr_info("Invalid crash point %d\n", cpoint);
pr_info("Invalid crashpoint %s\n", crashpoint->name);
goto out_err;
}
pr_info("Crash point %s of type %s registered\n",
cpoint_name, cpoint_type);
crashpoint->name, cpoint_type);
} else {
pr_info("No crash points registered, enable through debugfs\n");
}
......@@ -1305,9 +528,10 @@ static void __exit lkdtm_module_exit(void)
{
debugfs_remove_recursive(lkdtm_debugfs_root);
kmem_cache_destroy(bad_cache);
/* Handle test-specific clean-up. */
lkdtm_usercopy_exit();
unregister_jprobe(&lkdtm);
unregister_jprobe(lkdtm_jprobe);
pr_info("Crash point unregistered\n");
}
......@@ -1315,4 +539,4 @@ module_init(lkdtm_module_init);
module_exit(lkdtm_module_exit);
MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Kprobe module for testing crash dumps");
MODULE_DESCRIPTION("Kernel crash testing module");
/*
* This is for all the tests relating directly to heap memory, including
* page allocation and slab allocations.
*/
#define pr_fmt(fmt) "lkdtm: " fmt
#include <linux/kernel.h>
#include <linux/slab.h>
#include "lkdtm.h"
/*
* This tries to stay within the next largest power-of-2 kmalloc cache
* to avoid actually overwriting anything important if it's not detected
* correctly.
*/
void lkdtm_OVERWRITE_ALLOCATION(void)
{
size_t len = 1020;
u32 *data = kmalloc(len, GFP_KERNEL);
data[1024 / sizeof(u32)] = 0x12345678;
kfree(data);
}
void lkdtm_WRITE_AFTER_FREE(void)
{
int *base, *again;
size_t len = 1024;
/*
* The slub allocator uses the first word to store the free
* pointer in some configurations. Use the middle of the
* allocation to avoid running into the freelist
*/
size_t offset = (len / sizeof(*base)) / 2;
base = kmalloc(len, GFP_KERNEL);
pr_info("Allocated memory %p-%p\n", base, &base[offset * 2]);
pr_info("Attempting bad write to freed memory at %p\n",
&base[offset]);
kfree(base);
base[offset] = 0x0abcdef0;
/* Attempt to notice the overwrite. */
again = kmalloc(len, GFP_KERNEL);
kfree(again);
if (again != base)
pr_info("Hmm, didn't get the same memory range.\n");
}
void lkdtm_READ_AFTER_FREE(void)
{
int *base, *val, saw;
size_t len = 1024;
/*
* The slub allocator uses the first word to store the free
* pointer in some configurations. Use the middle of the
* allocation to avoid running into the freelist
*/
size_t offset = (len / sizeof(*base)) / 2;
base = kmalloc(len, GFP_KERNEL);
if (!base) {
pr_info("Unable to allocate base memory.\n");
return;
}
val = kmalloc(len, GFP_KERNEL);
if (!val) {
pr_info("Unable to allocate val memory.\n");
kfree(base);
return;
}
*val = 0x12345678;
base[offset] = *val;
pr_info("Value in memory before free: %x\n", base[offset]);
kfree(base);
pr_info("Attempting bad read from freed memory\n");
saw = base[offset];
if (saw != *val) {
/* Good! Poisoning happened, so declare a win. */
pr_info("Memory correctly poisoned (%x)\n", saw);
BUG();
}
pr_info("Memory was not poisoned\n");
kfree(val);
}
void lkdtm_WRITE_BUDDY_AFTER_FREE(void)
{
unsigned long p = __get_free_page(GFP_KERNEL);
if (!p) {
pr_info("Unable to allocate free page\n");
return;
}
pr_info("Writing to the buddy page before free\n");
memset((void *)p, 0x3, PAGE_SIZE);
free_page(p);
schedule();
pr_info("Attempting bad write to the buddy page after free\n");
memset((void *)p, 0x78, PAGE_SIZE);
/* Attempt to notice the overwrite. */
p = __get_free_page(GFP_KERNEL);
free_page(p);
schedule();
}
void lkdtm_READ_BUDDY_AFTER_FREE(void)
{
unsigned long p = __get_free_page(GFP_KERNEL);
int saw, *val;
int *base;
if (!p) {
pr_info("Unable to allocate free page\n");
return;
}
val = kmalloc(1024, GFP_KERNEL);
if (!val) {
pr_info("Unable to allocate val memory.\n");
free_page(p);
return;
}
base = (int *)p;
*val = 0x12345678;
base[0] = *val;
pr_info("Value in memory before free: %x\n", base[0]);
free_page(p);
pr_info("Attempting to read from freed memory\n");
saw = base[0];
if (saw != *val) {
/* Good! Poisoning happened, so declare a win. */
pr_info("Memory correctly poisoned (%x)\n", saw);
BUG();
}
pr_info("Buddy page was not poisoned\n");
kfree(val);
}
/*
* This is for all the tests related to validating kernel memory
* permissions: non-executable regions, non-writable regions, and
* even non-readable regions.
*/
#define pr_fmt(fmt) "lkdtm: " fmt
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mman.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
#include "lkdtm.h"
/* Whether or not to fill the target memory area with do_nothing(). */
#define CODE_WRITE true
#define CODE_AS_IS false
/* How many bytes to copy to be sure we've copied enough of do_nothing(). */
#define EXEC_SIZE 64
/* This is non-const, so it will end up in the .data section. */
static u8 data_area[EXEC_SIZE];
/* This is cost, so it will end up in the .rodata section. */
static const unsigned long rodata = 0xAA55AA55;
/* This is marked __ro_after_init, so it should ultimately be .rodata. */
static unsigned long ro_after_init __ro_after_init = 0x55AA5500;
/*
* This just returns to the caller. It is designed to be copied into
* non-executable memory regions.
*/
static void do_nothing(void)
{
return;
}
/* Must immediately follow do_nothing for size calculuations to work out. */
static void do_overwritten(void)
{
pr_info("do_overwritten wasn't overwritten!\n");
return;
}
static noinline void execute_location(void *dst, bool write)
{
void (*func)(void) = dst;
pr_info("attempting ok execution at %p\n", do_nothing);
do_nothing();
if (write == CODE_WRITE) {
memcpy(dst, do_nothing, EXEC_SIZE);
flush_icache_range((unsigned long)dst,
(unsigned long)dst + EXEC_SIZE);
}
pr_info("attempting bad execution at %p\n", func);
func();
}
static void execute_user_location(void *dst)
{
/* Intentionally crossing kernel/user memory boundary. */
void (*func)(void) = dst;
pr_info("attempting ok execution at %p\n", do_nothing);
do_nothing();
if (copy_to_user((void __user *)dst, do_nothing, EXEC_SIZE))
return;
flush_icache_range((unsigned long)dst, (unsigned long)dst + EXEC_SIZE);
pr_info("attempting bad execution at %p\n", func);
func();
}
void lkdtm_WRITE_RO(void)
{
/* Explicitly cast away "const" for the test. */
unsigned long *ptr = (unsigned long *)&rodata;
pr_info("attempting bad rodata write at %p\n", ptr);
*ptr ^= 0xabcd1234;
}
void lkdtm_WRITE_RO_AFTER_INIT(void)
{
unsigned long *ptr = &ro_after_init;
/*
* Verify we were written to during init. Since an Oops
* is considered a "success", a failure is to just skip the
* real test.
*/
if ((*ptr & 0xAA) != 0xAA) {
pr_info("%p was NOT written during init!?\n", ptr);
return;
}
pr_info("attempting bad ro_after_init write at %p\n", ptr);
*ptr ^= 0xabcd1234;
}
void lkdtm_WRITE_KERN(void)
{
size_t size;
unsigned char *ptr;
size = (unsigned long)do_overwritten - (unsigned long)do_nothing;
ptr = (unsigned char *)do_overwritten;
pr_info("attempting bad %zu byte write at %p\n", size, ptr);
memcpy(ptr, (unsigned char *)do_nothing, size);
flush_icache_range((unsigned long)ptr, (unsigned long)(ptr + size));
do_overwritten();
}
void lkdtm_EXEC_DATA(void)
{
execute_location(data_area, CODE_WRITE);
}
void lkdtm_EXEC_STACK(void)
{
u8 stack_area[EXEC_SIZE];
execute_location(stack_area, CODE_WRITE);
}
void lkdtm_EXEC_KMALLOC(void)
{
u32 *kmalloc_area = kmalloc(EXEC_SIZE, GFP_KERNEL);
execute_location(kmalloc_area, CODE_WRITE);
kfree(kmalloc_area);
}
void lkdtm_EXEC_VMALLOC(void)
{
u32 *vmalloc_area = vmalloc(EXEC_SIZE);
execute_location(vmalloc_area, CODE_WRITE);
vfree(vmalloc_area);
}
void lkdtm_EXEC_RODATA(void)
{
execute_location(lkdtm_rodata_do_nothing, CODE_AS_IS);
}
void lkdtm_EXEC_USERSPACE(void)
{
unsigned long user_addr;
user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANONYMOUS | MAP_PRIVATE, 0);
if (user_addr >= TASK_SIZE) {
pr_warn("Failed to allocate user memory\n");
return;
}
execute_user_location((void *)user_addr);
vm_munmap(user_addr, PAGE_SIZE);
}
void lkdtm_ACCESS_USERSPACE(void)
{
unsigned long user_addr, tmp = 0;
unsigned long *ptr;
user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANONYMOUS | MAP_PRIVATE, 0);
if (user_addr >= TASK_SIZE) {
pr_warn("Failed to allocate user memory\n");
return;
}
if (copy_to_user((void __user *)user_addr, &tmp, sizeof(tmp))) {
pr_warn("copy_to_user failed\n");
vm_munmap(user_addr, PAGE_SIZE);
return;
}
ptr = (unsigned long *)user_addr;
pr_info("attempting bad read at %p\n", ptr);
tmp = *ptr;
tmp += 0xc0dec0de;
pr_info("attempting bad write at %p\n", ptr);
*ptr = tmp;
vm_munmap(user_addr, PAGE_SIZE);
}
void __init lkdtm_perms_init(void)
{
/* Make sure we can write to __ro_after_init values during __init */
ro_after_init |= 0xAA;
}
/*
* This is for all the tests related to copy_to_user() and copy_from_user()
* hardening.
*/
#define pr_fmt(fmt) "lkdtm: " fmt
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <linux/mman.h>
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
static size_t cache_size = 1024;
static struct kmem_cache *bad_cache;
static const unsigned char test_text[] = "This is a test.\n";
/*
* Instead of adding -Wno-return-local-addr, just pass the stack address
* through a function to obfuscate it from the compiler.
*/
static noinline unsigned char *trick_compiler(unsigned char *stack)
{
return stack + 0;
}
static noinline unsigned char *do_usercopy_stack_callee(int value)
{
unsigned char buf[32];
int i;
/* Exercise stack to avoid everything living in registers. */
for (i = 0; i < sizeof(buf); i++) {
buf[i] = value & 0xff;
}
return trick_compiler(buf);
}
static noinline void do_usercopy_stack(bool to_user, bool bad_frame)
{
unsigned long user_addr;
unsigned char good_stack[32];
unsigned char *bad_stack;
int i;
/* Exercise stack to avoid everything living in registers. */
for (i = 0; i < sizeof(good_stack); i++)
good_stack[i] = test_text[i % sizeof(test_text)];
/* This is a pointer to outside our current stack frame. */
if (bad_frame) {
bad_stack = do_usercopy_stack_callee((uintptr_t)bad_stack);
} else {
/* Put start address just inside stack. */
bad_stack = task_stack_page(current) + THREAD_SIZE;
bad_stack -= sizeof(unsigned long);
}
user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANONYMOUS | MAP_PRIVATE, 0);
if (user_addr >= TASK_SIZE) {
pr_warn("Failed to allocate user memory\n");
return;
}
if (to_user) {
pr_info("attempting good copy_to_user of local stack\n");
if (copy_to_user((void __user *)user_addr, good_stack,
sizeof(good_stack))) {
pr_warn("copy_to_user failed unexpectedly?!\n");
goto free_user;
}
pr_info("attempting bad copy_to_user of distant stack\n");
if (copy_to_user((void __user *)user_addr, bad_stack,
sizeof(good_stack))) {
pr_warn("copy_to_user failed, but lacked Oops\n");
goto free_user;
}
} else {
/*
* There isn't a safe way to not be protected by usercopy
* if we're going to write to another thread's stack.
*/
if (!bad_frame)
goto free_user;
pr_info("attempting good copy_from_user of local stack\n");
if (copy_from_user(good_stack, (void __user *)user_addr,
sizeof(good_stack))) {
pr_warn("copy_from_user failed unexpectedly?!\n");
goto free_user;
}
pr_info("attempting bad copy_from_user of distant stack\n");
if (copy_from_user(bad_stack, (void __user *)user_addr,
sizeof(good_stack))) {
pr_warn("copy_from_user failed, but lacked Oops\n");
goto free_user;
}
}
free_user:
vm_munmap(user_addr, PAGE_SIZE);
}
static void do_usercopy_heap_size(bool to_user)
{
unsigned long user_addr;
unsigned char *one, *two;
const size_t size = 1024;
one = kmalloc(size, GFP_KERNEL);
two = kmalloc(size, GFP_KERNEL);
if (!one || !two) {
pr_warn("Failed to allocate kernel memory\n");
goto free_kernel;
}
user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANONYMOUS | MAP_PRIVATE, 0);
if (user_addr >= TASK_SIZE) {
pr_warn("Failed to allocate user memory\n");
goto free_kernel;
}
memset(one, 'A', size);
memset(two, 'B', size);
if (to_user) {
pr_info("attempting good copy_to_user of correct size\n");
if (copy_to_user((void __user *)user_addr, one, size)) {
pr_warn("copy_to_user failed unexpectedly?!\n");
goto free_user;
}
pr_info("attempting bad copy_to_user of too large size\n");
if (copy_to_user((void __user *)user_addr, one, 2 * size)) {
pr_warn("copy_to_user failed, but lacked Oops\n");
goto free_user;
}
} else {
pr_info("attempting good copy_from_user of correct size\n");
if (copy_from_user(one, (void __user *)user_addr, size)) {
pr_warn("copy_from_user failed unexpectedly?!\n");
goto free_user;
}
pr_info("attempting bad copy_from_user of too large size\n");
if (copy_from_user(one, (void __user *)user_addr, 2 * size)) {
pr_warn("copy_from_user failed, but lacked Oops\n");
goto free_user;
}
}
free_user:
vm_munmap(user_addr, PAGE_SIZE);
free_kernel:
kfree(one);
kfree(two);
}
static void do_usercopy_heap_flag(bool to_user)
{
unsigned long user_addr;
unsigned char *good_buf = NULL;
unsigned char *bad_buf = NULL;
/* Make sure cache was prepared. */
if (!bad_cache) {
pr_warn("Failed to allocate kernel cache\n");
return;
}
/*
* Allocate one buffer from each cache (kmalloc will have the
* SLAB_USERCOPY flag already, but "bad_cache" won't).
*/
good_buf = kmalloc(cache_size, GFP_KERNEL);
bad_buf = kmem_cache_alloc(bad_cache, GFP_KERNEL);
if (!good_buf || !bad_buf) {
pr_warn("Failed to allocate buffers from caches\n");
goto free_alloc;
}
/* Allocate user memory we'll poke at. */
user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANONYMOUS | MAP_PRIVATE, 0);
if (user_addr >= TASK_SIZE) {
pr_warn("Failed to allocate user memory\n");
goto free_alloc;
}
memset(good_buf, 'A', cache_size);
memset(bad_buf, 'B', cache_size);
if (to_user) {
pr_info("attempting good copy_to_user with SLAB_USERCOPY\n");
if (copy_to_user((void __user *)user_addr, good_buf,
cache_size)) {
pr_warn("copy_to_user failed unexpectedly?!\n");
goto free_user;
}
pr_info("attempting bad copy_to_user w/o SLAB_USERCOPY\n");
if (copy_to_user((void __user *)user_addr, bad_buf,
cache_size)) {
pr_warn("copy_to_user failed, but lacked Oops\n");
goto free_user;
}
} else {
pr_info("attempting good copy_from_user with SLAB_USERCOPY\n");
if (copy_from_user(good_buf, (void __user *)user_addr,
cache_size)) {
pr_warn("copy_from_user failed unexpectedly?!\n");
goto free_user;
}
pr_info("attempting bad copy_from_user w/o SLAB_USERCOPY\n");
if (copy_from_user(bad_buf, (void __user *)user_addr,
cache_size)) {
pr_warn("copy_from_user failed, but lacked Oops\n");
goto free_user;
}
}
free_user:
vm_munmap(user_addr, PAGE_SIZE);
free_alloc:
if (bad_buf)
kmem_cache_free(bad_cache, bad_buf);
kfree(good_buf);
}
/* Callable tests. */
void lkdtm_USERCOPY_HEAP_SIZE_TO(void)
{
do_usercopy_heap_size(true);
}
void lkdtm_USERCOPY_HEAP_SIZE_FROM(void)
{
do_usercopy_heap_size(false);
}
void lkdtm_USERCOPY_HEAP_FLAG_TO(void)
{
do_usercopy_heap_flag(true);
}
void lkdtm_USERCOPY_HEAP_FLAG_FROM(void)
{
do_usercopy_heap_flag(false);
}
void lkdtm_USERCOPY_STACK_FRAME_TO(void)
{
do_usercopy_stack(true, true);
}
void lkdtm_USERCOPY_STACK_FRAME_FROM(void)
{
do_usercopy_stack(false, true);
}
void lkdtm_USERCOPY_STACK_BEYOND(void)
{
do_usercopy_stack(true, false);
}
void lkdtm_USERCOPY_KERNEL(void)
{
unsigned long user_addr;
user_addr = vm_mmap(NULL, 0, PAGE_SIZE,
PROT_READ | PROT_WRITE | PROT_EXEC,
MAP_ANONYMOUS | MAP_PRIVATE, 0);
if (user_addr >= TASK_SIZE) {
pr_warn("Failed to allocate user memory\n");
return;
}
pr_info("attempting good copy_to_user from kernel rodata\n");
if (copy_to_user((void __user *)user_addr, test_text,
sizeof(test_text))) {
pr_warn("copy_to_user failed unexpectedly?!\n");
goto free_user;
}
pr_info("attempting bad copy_to_user from kernel text\n");
if (copy_to_user((void __user *)user_addr, vm_mmap, PAGE_SIZE)) {
pr_warn("copy_to_user failed, but lacked Oops\n");
goto free_user;
}
free_user:
vm_munmap(user_addr, PAGE_SIZE);
}
void __init lkdtm_usercopy_init(void)
{
/* Prepare cache that lacks SLAB_USERCOPY flag. */
bad_cache = kmem_cache_create("lkdtm-no-usercopy", cache_size, 0,
0, NULL);
}
void __exit lkdtm_usercopy_exit(void)
{
kmem_cache_destroy(bad_cache);
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment