Commit 9ccaf77c authored by Kees Cook's avatar Kees Cook Committed by Ingo Molnar

x86/mm: Always enable CONFIG_DEBUG_RODATA and remove the Kconfig option

This removes the CONFIG_DEBUG_RODATA option and makes it always enabled.

This simplifies the code and also makes it clearer that read-only mapped
memory is just as fundamental a security feature in kernel-space as it is
in user-space.
Suggested-by: default avatarIngo Molnar <mingo@kernel.org>
Signed-off-by: default avatarKees Cook <keescook@chromium.org>
Cc: Andy Lutomirski <luto@amacapital.net>
Cc: Arnd Bergmann <arnd@arndb.de>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Brian Gerst <brgerst@gmail.com>
Cc: David Brown <david.brown@linaro.org>
Cc: Denys Vlasenko <dvlasenk@redhat.com>
Cc: Emese Revfy <re.emese@gmail.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Mathias Krause <minipli@googlemail.com>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: PaX Team <pageexec@freemail.hu>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: kernel-hardening@lists.openwall.com
Cc: linux-arch <linux-arch@vger.kernel.org>
Link: http://lkml.kernel.org/r/1455748879-21872-4-git-send-email-keescook@chromium.orgSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent d2aa1aca
...@@ -303,6 +303,9 @@ config ARCH_SUPPORTS_UPROBES ...@@ -303,6 +303,9 @@ config ARCH_SUPPORTS_UPROBES
config FIX_EARLYCON_MEM config FIX_EARLYCON_MEM
def_bool y def_bool y
config DEBUG_RODATA
def_bool y
config PGTABLE_LEVELS config PGTABLE_LEVELS
int int
default 4 if X86_64 default 4 if X86_64
......
...@@ -74,28 +74,16 @@ config EFI_PGT_DUMP ...@@ -74,28 +74,16 @@ config EFI_PGT_DUMP
issues with the mapping of the EFI runtime regions into that issues with the mapping of the EFI runtime regions into that
table. table.
config DEBUG_RODATA
bool "Write protect kernel read-only data structures"
default y
depends on DEBUG_KERNEL
---help---
Mark the kernel read-only data as write-protected in the pagetables,
in order to catch accidental (and incorrect) writes to such const
data. This is recommended so that we can catch kernel bugs sooner.
If in doubt, say "Y".
config DEBUG_RODATA_TEST config DEBUG_RODATA_TEST
bool "Testcase for the DEBUG_RODATA feature" bool "Testcase for the marking rodata read-only"
depends on DEBUG_RODATA
default y default y
---help--- ---help---
This option enables a testcase for the DEBUG_RODATA This option enables a testcase for the setting rodata read-only
feature as well as for the change_page_attr() infrastructure. as well as for the change_page_attr() infrastructure.
If in doubt, say "N" If in doubt, say "N"
config DEBUG_WX config DEBUG_WX
bool "Warn on W+X mappings at boot" bool "Warn on W+X mappings at boot"
depends on DEBUG_RODATA
select X86_PTDUMP_CORE select X86_PTDUMP_CORE
---help--- ---help---
Generate a warning if any W+X mappings are found at boot. Generate a warning if any W+X mappings are found at boot.
......
...@@ -91,15 +91,10 @@ void clflush_cache_range(void *addr, unsigned int size); ...@@ -91,15 +91,10 @@ void clflush_cache_range(void *addr, unsigned int size);
#define mmio_flush_range(addr, size) clflush_cache_range(addr, size) #define mmio_flush_range(addr, size) clflush_cache_range(addr, size)
#ifdef CONFIG_DEBUG_RODATA
extern const int rodata_test_data; extern const int rodata_test_data;
extern int kernel_set_to_readonly; extern int kernel_set_to_readonly;
void set_kernel_text_rw(void); void set_kernel_text_rw(void);
void set_kernel_text_ro(void); void set_kernel_text_ro(void);
#else
static inline void set_kernel_text_rw(void) { }
static inline void set_kernel_text_ro(void) { }
#endif
#ifdef CONFIG_DEBUG_RODATA_TEST #ifdef CONFIG_DEBUG_RODATA_TEST
int rodata_test(void); int rodata_test(void);
......
...@@ -17,15 +17,8 @@ static inline bool kvm_check_and_clear_guest_paused(void) ...@@ -17,15 +17,8 @@ static inline bool kvm_check_and_clear_guest_paused(void)
} }
#endif /* CONFIG_KVM_GUEST */ #endif /* CONFIG_KVM_GUEST */
#ifdef CONFIG_DEBUG_RODATA
#define KVM_HYPERCALL \ #define KVM_HYPERCALL \
ALTERNATIVE(".byte 0x0f,0x01,0xc1", ".byte 0x0f,0x01,0xd9", X86_FEATURE_VMMCALL) ALTERNATIVE(".byte 0x0f,0x01,0xc1", ".byte 0x0f,0x01,0xd9", X86_FEATURE_VMMCALL)
#else
/* On AMD processors, vmcall will generate a trap that we will
* then rewrite to the appropriate instruction.
*/
#define KVM_HYPERCALL ".byte 0x0f,0x01,0xc1"
#endif
/* For KVM hypercalls, a three-byte sequence of either the vmcall or the vmmcall /* For KVM hypercalls, a three-byte sequence of either the vmcall or the vmmcall
* instruction. The hypervisor may replace it with something else but only the * instruction. The hypervisor may replace it with something else but only the
......
...@@ -7,7 +7,7 @@ ...@@ -7,7 +7,7 @@
extern char __brk_base[], __brk_limit[]; extern char __brk_base[], __brk_limit[];
extern struct exception_table_entry __stop___ex_table[]; extern struct exception_table_entry __stop___ex_table[];
#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) #if defined(CONFIG_X86_64)
extern char __end_rodata_hpage_align[]; extern char __end_rodata_hpage_align[];
#endif #endif
......
...@@ -81,9 +81,9 @@ within(unsigned long addr, unsigned long start, unsigned long end) ...@@ -81,9 +81,9 @@ within(unsigned long addr, unsigned long start, unsigned long end)
static unsigned long text_ip_addr(unsigned long ip) static unsigned long text_ip_addr(unsigned long ip)
{ {
/* /*
* On x86_64, kernel text mappings are mapped read-only with * On x86_64, kernel text mappings are mapped read-only, so we use
* CONFIG_DEBUG_RODATA. So we use the kernel identity mapping instead * the kernel identity mapping instead of the kernel text mapping
* of the kernel text mapping to modify the kernel text. * to modify the kernel text.
* *
* For 32bit kernels, these mappings are same and we can use * For 32bit kernels, these mappings are same and we can use
* kernel identity mapping to modify code. * kernel identity mapping to modify code.
......
...@@ -750,9 +750,7 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip) ...@@ -750,9 +750,7 @@ void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
{ {
int err; int err;
#ifdef CONFIG_DEBUG_RODATA
char opc[BREAK_INSTR_SIZE]; char opc[BREAK_INSTR_SIZE];
#endif /* CONFIG_DEBUG_RODATA */
bpt->type = BP_BREAKPOINT; bpt->type = BP_BREAKPOINT;
err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr, err = probe_kernel_read(bpt->saved_instr, (char *)bpt->bpt_addr,
...@@ -761,7 +759,6 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) ...@@ -761,7 +759,6 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
return err; return err;
err = probe_kernel_write((char *)bpt->bpt_addr, err = probe_kernel_write((char *)bpt->bpt_addr,
arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE); arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
#ifdef CONFIG_DEBUG_RODATA
if (!err) if (!err)
return err; return err;
/* /*
...@@ -778,13 +775,12 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt) ...@@ -778,13 +775,12 @@ int kgdb_arch_set_breakpoint(struct kgdb_bkpt *bpt)
if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE)) if (memcmp(opc, arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE))
return -EINVAL; return -EINVAL;
bpt->type = BP_POKE_BREAKPOINT; bpt->type = BP_POKE_BREAKPOINT;
#endif /* CONFIG_DEBUG_RODATA */
return err; return err;
} }
int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
{ {
#ifdef CONFIG_DEBUG_RODATA
int err; int err;
char opc[BREAK_INSTR_SIZE]; char opc[BREAK_INSTR_SIZE];
...@@ -801,8 +797,8 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt) ...@@ -801,8 +797,8 @@ int kgdb_arch_remove_breakpoint(struct kgdb_bkpt *bpt)
if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE)) if (err || memcmp(opc, bpt->saved_instr, BREAK_INSTR_SIZE))
goto knl_write; goto knl_write;
return err; return err;
knl_write: knl_write:
#endif /* CONFIG_DEBUG_RODATA */
return probe_kernel_write((char *)bpt->bpt_addr, return probe_kernel_write((char *)bpt->bpt_addr,
(char *)bpt->saved_instr, BREAK_INSTR_SIZE); (char *)bpt->saved_instr, BREAK_INSTR_SIZE);
} }
......
...@@ -142,7 +142,6 @@ static int test_NX(void) ...@@ -142,7 +142,6 @@ static int test_NX(void)
* by the error message * by the error message
*/ */
#ifdef CONFIG_DEBUG_RODATA
/* Test 3: Check if the .rodata section is executable */ /* Test 3: Check if the .rodata section is executable */
if (rodata_test_data != 0xC3) { if (rodata_test_data != 0xC3) {
printk(KERN_ERR "test_nx: .rodata marker has invalid value\n"); printk(KERN_ERR "test_nx: .rodata marker has invalid value\n");
...@@ -151,7 +150,6 @@ static int test_NX(void) ...@@ -151,7 +150,6 @@ static int test_NX(void)
printk(KERN_ERR "test_nx: .rodata section is executable\n"); printk(KERN_ERR "test_nx: .rodata section is executable\n");
ret = -ENODEV; ret = -ENODEV;
} }
#endif
#if 0 #if 0
/* Test 4: Check if the .data section of a module is executable */ /* Test 4: Check if the .data section of a module is executable */
......
...@@ -76,5 +76,5 @@ int rodata_test(void) ...@@ -76,5 +76,5 @@ int rodata_test(void)
} }
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_DESCRIPTION("Testcase for the DEBUG_RODATA infrastructure"); MODULE_DESCRIPTION("Testcase for marking rodata as read-only");
MODULE_AUTHOR("Arjan van de Ven <arjan@linux.intel.com>"); MODULE_AUTHOR("Arjan van de Ven <arjan@linux.intel.com>");
...@@ -41,29 +41,28 @@ ENTRY(phys_startup_64) ...@@ -41,29 +41,28 @@ ENTRY(phys_startup_64)
jiffies_64 = jiffies; jiffies_64 = jiffies;
#endif #endif
#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) #if defined(CONFIG_X86_64)
/* /*
* On 64-bit, align RODATA to 2MB so that even with CONFIG_DEBUG_RODATA * On 64-bit, align RODATA to 2MB so we retain large page mappings for
* we retain large page mappings for boundaries spanning kernel text, rodata * boundaries spanning kernel text, rodata and data sections.
* and data sections.
* *
* However, kernel identity mappings will have different RWX permissions * However, kernel identity mappings will have different RWX permissions
* to the pages mapping to text and to the pages padding (which are freed) the * to the pages mapping to text and to the pages padding (which are freed) the
* text section. Hence kernel identity mappings will be broken to smaller * text section. Hence kernel identity mappings will be broken to smaller
* pages. For 64-bit, kernel text and kernel identity mappings are different, * pages. For 64-bit, kernel text and kernel identity mappings are different,
* so we can enable protection checks that come with CONFIG_DEBUG_RODATA, * so we can enable protection checks as well as retain 2MB large page
* as well as retain 2MB large page mappings for kernel text. * mappings for kernel text.
*/ */
#define X64_ALIGN_DEBUG_RODATA_BEGIN . = ALIGN(HPAGE_SIZE); #define X64_ALIGN_RODATA_BEGIN . = ALIGN(HPAGE_SIZE);
#define X64_ALIGN_DEBUG_RODATA_END \ #define X64_ALIGN_RODATA_END \
. = ALIGN(HPAGE_SIZE); \ . = ALIGN(HPAGE_SIZE); \
__end_rodata_hpage_align = .; __end_rodata_hpage_align = .;
#else #else
#define X64_ALIGN_DEBUG_RODATA_BEGIN #define X64_ALIGN_RODATA_BEGIN
#define X64_ALIGN_DEBUG_RODATA_END #define X64_ALIGN_RODATA_END
#endif #endif
...@@ -112,13 +111,11 @@ SECTIONS ...@@ -112,13 +111,11 @@ SECTIONS
EXCEPTION_TABLE(16) :text = 0x9090 EXCEPTION_TABLE(16) :text = 0x9090
#if defined(CONFIG_DEBUG_RODATA)
/* .text should occupy whole number of pages */ /* .text should occupy whole number of pages */
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
#endif X64_ALIGN_RODATA_BEGIN
X64_ALIGN_DEBUG_RODATA_BEGIN
RO_DATA(PAGE_SIZE) RO_DATA(PAGE_SIZE)
X64_ALIGN_DEBUG_RODATA_END X64_ALIGN_RODATA_END
/* Data */ /* Data */
.data : AT(ADDR(.data) - LOAD_OFFSET) { .data : AT(ADDR(.data) - LOAD_OFFSET) {
......
...@@ -871,7 +871,6 @@ static noinline int do_test_wp_bit(void) ...@@ -871,7 +871,6 @@ static noinline int do_test_wp_bit(void)
return flag; return flag;
} }
#ifdef CONFIG_DEBUG_RODATA
const int rodata_test_data = 0xC3; const int rodata_test_data = 0xC3;
EXPORT_SYMBOL_GPL(rodata_test_data); EXPORT_SYMBOL_GPL(rodata_test_data);
...@@ -960,5 +959,3 @@ void mark_rodata_ro(void) ...@@ -960,5 +959,3 @@ void mark_rodata_ro(void)
if (__supported_pte_mask & _PAGE_NX) if (__supported_pte_mask & _PAGE_NX)
debug_checkwx(); debug_checkwx();
} }
#endif
...@@ -1074,7 +1074,6 @@ void __init mem_init(void) ...@@ -1074,7 +1074,6 @@ void __init mem_init(void)
mem_init_print_info(NULL); mem_init_print_info(NULL);
} }
#ifdef CONFIG_DEBUG_RODATA
const int rodata_test_data = 0xC3; const int rodata_test_data = 0xC3;
EXPORT_SYMBOL_GPL(rodata_test_data); EXPORT_SYMBOL_GPL(rodata_test_data);
...@@ -1166,8 +1165,6 @@ void mark_rodata_ro(void) ...@@ -1166,8 +1165,6 @@ void mark_rodata_ro(void)
debug_checkwx(); debug_checkwx();
} }
#endif
int kern_addr_valid(unsigned long addr) int kern_addr_valid(unsigned long addr)
{ {
unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT; unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
......
...@@ -283,7 +283,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address, ...@@ -283,7 +283,7 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long address,
__pa_symbol(__end_rodata) >> PAGE_SHIFT)) __pa_symbol(__end_rodata) >> PAGE_SHIFT))
pgprot_val(forbidden) |= _PAGE_RW; pgprot_val(forbidden) |= _PAGE_RW;
#if defined(CONFIG_X86_64) && defined(CONFIG_DEBUG_RODATA) #if defined(CONFIG_X86_64)
/* /*
* Once the kernel maps the text as RO (kernel_set_to_readonly is set), * Once the kernel maps the text as RO (kernel_set_to_readonly is set),
* kernel text mappings for the large page aligned text, rodata sections * kernel text mappings for the large page aligned text, rodata sections
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment