Commit 6739cc12 authored by Kees Cook's avatar Kees Cook Committed by Greg Kroah-Hartman

mm: Tighten x86 /dev/mem with zeroing reads

commit a4866aa8 upstream.

Under CONFIG_STRICT_DEVMEM, reading System RAM through /dev/mem is
disallowed. However, on x86, the first 1MB was always allowed for BIOS
and similar things, regardless of it actually being System RAM. It was
possible for heap to end up getting allocated in low 1MB RAM, and then
read by things like x86info or dd, which would trip hardened usercopy:

usercopy: kernel memory exposure attempt detected from ffff880000090000 (dma-kmalloc-256) (4096 bytes)

This changes the x86 exception for the low 1MB by reading back zeros for
System RAM areas instead of blindly allowing them. More work is needed to
extend this to mmap, but currently mmap doesn't go through usercopy, so
hardened usercopy won't Oops the kernel.
Reported-by: default avatarTommi Rantala <tommi.t.rantala@nokia.com>
Tested-by: default avatarTommi Rantala <tommi.t.rantala@nokia.com>
Signed-off-by: default avatarKees Cook <keescook@chromium.org>
Cc: Brad Spengler <spender@grsecurity.net>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent ba027813
...@@ -628,21 +628,40 @@ void __init init_mem_mapping(void) ...@@ -628,21 +628,40 @@ void __init init_mem_mapping(void)
* devmem_is_allowed() checks to see if /dev/mem access to a certain address * devmem_is_allowed() checks to see if /dev/mem access to a certain address
* is valid. The argument is a physical page number. * is valid. The argument is a physical page number.
* *
* * On x86, access has to be given to the first megabyte of RAM because that
* On x86, access has to be given to the first megabyte of ram because that area * area traditionally contains BIOS code and data regions used by X, dosemu,
* contains BIOS code and data regions used by X and dosemu and similar apps. * and similar apps. Since they map the entire memory range, the whole range
* Access has to be given to non-kernel-ram areas as well, these contain the PCI * must be allowed (for mapping), but any areas that would otherwise be
* mmio resources as well as potential bios/acpi data regions. * disallowed are flagged as being "zero filled" instead of rejected.
* Access has to be given to non-kernel-ram areas as well, these contain the
* PCI mmio resources as well as potential bios/acpi data regions.
*/ */
int devmem_is_allowed(unsigned long pagenr) int devmem_is_allowed(unsigned long pagenr)
{ {
if (pagenr < 256) if (page_is_ram(pagenr)) {
return 1; /*
if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) * For disallowed memory regions in the low 1MB range,
* request that the page be shown as all zeros.
*/
if (pagenr < 256)
return 2;
return 0;
}
/*
* This must follow RAM test, since System RAM is considered a
* restricted resource under CONFIG_STRICT_IOMEM.
*/
if (iomem_is_exclusive(pagenr << PAGE_SHIFT)) {
/* Low 1MB bypasses iomem restrictions. */
if (pagenr < 256)
return 1;
return 0; return 0;
if (!page_is_ram(pagenr)) }
return 1;
return 0; return 1;
} }
void free_init_pages(char *what, unsigned long begin, unsigned long end) void free_init_pages(char *what, unsigned long begin, unsigned long end)
......
...@@ -59,6 +59,10 @@ static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) ...@@ -59,6 +59,10 @@ static inline int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
#endif #endif
#ifdef CONFIG_STRICT_DEVMEM #ifdef CONFIG_STRICT_DEVMEM
static inline int page_is_allowed(unsigned long pfn)
{
return devmem_is_allowed(pfn);
}
static inline int range_is_allowed(unsigned long pfn, unsigned long size) static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{ {
u64 from = ((u64)pfn) << PAGE_SHIFT; u64 from = ((u64)pfn) << PAGE_SHIFT;
...@@ -78,6 +82,10 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size) ...@@ -78,6 +82,10 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
return 1; return 1;
} }
#else #else
static inline int page_is_allowed(unsigned long pfn)
{
return 1;
}
static inline int range_is_allowed(unsigned long pfn, unsigned long size) static inline int range_is_allowed(unsigned long pfn, unsigned long size)
{ {
return 1; return 1;
...@@ -125,23 +133,31 @@ static ssize_t read_mem(struct file *file, char __user *buf, ...@@ -125,23 +133,31 @@ static ssize_t read_mem(struct file *file, char __user *buf,
while (count > 0) { while (count > 0) {
unsigned long remaining; unsigned long remaining;
int allowed;
sz = size_inside_page(p, count); sz = size_inside_page(p, count);
if (!range_is_allowed(p >> PAGE_SHIFT, count)) allowed = page_is_allowed(p >> PAGE_SHIFT);
if (!allowed)
return -EPERM; return -EPERM;
if (allowed == 2) {
/* Show zeros for restricted memory. */
remaining = clear_user(buf, sz);
} else {
/*
* On ia64 if a page has been mapped somewhere as
* uncached, then it must also be accessed uncached
* by the kernel or data corruption may occur.
*/
ptr = xlate_dev_mem_ptr(p);
if (!ptr)
return -EFAULT;
/* remaining = copy_to_user(buf, ptr, sz);
* On ia64 if a page has been mapped somewhere as uncached, then
* it must also be accessed uncached by the kernel or data unxlate_dev_mem_ptr(p, ptr);
* corruption may occur. }
*/
ptr = xlate_dev_mem_ptr(p);
if (!ptr)
return -EFAULT;
remaining = copy_to_user(buf, ptr, sz);
unxlate_dev_mem_ptr(p, ptr);
if (remaining) if (remaining)
return -EFAULT; return -EFAULT;
...@@ -184,30 +200,36 @@ static ssize_t write_mem(struct file *file, const char __user *buf, ...@@ -184,30 +200,36 @@ static ssize_t write_mem(struct file *file, const char __user *buf,
#endif #endif
while (count > 0) { while (count > 0) {
int allowed;
sz = size_inside_page(p, count); sz = size_inside_page(p, count);
if (!range_is_allowed(p >> PAGE_SHIFT, sz)) allowed = page_is_allowed(p >> PAGE_SHIFT);
if (!allowed)
return -EPERM; return -EPERM;
/* /* Skip actual writing when a page is marked as restricted. */
* On ia64 if a page has been mapped somewhere as uncached, then if (allowed == 1) {
* it must also be accessed uncached by the kernel or data /*
* corruption may occur. * On ia64 if a page has been mapped somewhere as
*/ * uncached, then it must also be accessed uncached
ptr = xlate_dev_mem_ptr(p); * by the kernel or data corruption may occur.
if (!ptr) { */
if (written) ptr = xlate_dev_mem_ptr(p);
break; if (!ptr) {
return -EFAULT; if (written)
} break;
return -EFAULT;
}
copied = copy_from_user(ptr, buf, sz); copied = copy_from_user(ptr, buf, sz);
unxlate_dev_mem_ptr(p, ptr); unxlate_dev_mem_ptr(p, ptr);
if (copied) { if (copied) {
written += sz - copied; written += sz - copied;
if (written) if (written)
break; break;
return -EFAULT; return -EFAULT;
}
} }
buf += sz; buf += sz;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment