Commit 9d8e0d52 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'x86-boot-2024-05-13' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 boot updates from Ingo Molnar:

 - Move the kernel cmdline setup earlier in the boot process (again),
   to address a split_lock_detect= boot parameter bug

 - Ignore relocations in .notes sections

 - Simplify boot stack setup

 - Re-introduce a bootloader quirk wrt CR4 handling

 - Miscellaneous cleanups & fixes

* tag 'x86-boot-2024-05-13' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/boot/64: Clear most of CR4 in startup_64(), except PAE, MCE and LA57
  x86/boot: Move kernel cmdline setup earlier in the boot process (again)
  x86/build: Clean up arch/x86/tools/relocs.c a bit
  x86/boot: Ignore relocations in .notes sections in walk_relocs() too
  x86: Rename __{start,end}_init_task to __{start,end}_init_stack
  x86/boot: Simplify boot stack setup
parents d791a4da a0025f58
...@@ -398,6 +398,11 @@ SYM_CODE_START(startup_64) ...@@ -398,6 +398,11 @@ SYM_CODE_START(startup_64)
call sev_enable call sev_enable
#endif #endif
/* Preserve only the CR4 bits that must be preserved, and clear the rest */
movq %cr4, %rax
andl $(X86_CR4_PAE | X86_CR4_MCE | X86_CR4_LA57), %eax
movq %rax, %cr4
/* /*
* configure_5level_paging() updates the number of paging levels using * configure_5level_paging() updates the number of paging levels using
* a trampoline in 32-bit addressable memory if the current number does * a trampoline in 32-bit addressable memory if the current number does
......
...@@ -635,12 +635,10 @@ static __always_inline void prefetchw(const void *x) ...@@ -635,12 +635,10 @@ static __always_inline void prefetchw(const void *x)
#define KSTK_ESP(task) (task_pt_regs(task)->sp) #define KSTK_ESP(task) (task_pt_regs(task)->sp)
#else #else
extern unsigned long __end_init_task[]; extern unsigned long __top_init_kernel_stack[];
#define INIT_THREAD { \ #define INIT_THREAD { \
.sp = (unsigned long)&__end_init_task - \ .sp = (unsigned long)&__top_init_kernel_stack, \
TOP_OF_KERNEL_STACK_PADDING - \
sizeof(struct pt_regs), \
} }
extern unsigned long KSTK_ESP(struct task_struct *task); extern unsigned long KSTK_ESP(struct task_struct *task);
......
...@@ -44,9 +44,6 @@ ...@@ -44,9 +44,6 @@
#define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability #define X86_CAPABILITY new_cpu_data+CPUINFO_x86_capability
#define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id #define X86_VENDOR_ID new_cpu_data+CPUINFO_x86_vendor_id
#define SIZEOF_PTREGS 17*4
/* /*
* Worst-case size of the kernel mapping we need to make: * Worst-case size of the kernel mapping we need to make:
* a relocatable kernel can live anywhere in lowmem, so we need to be able * a relocatable kernel can live anywhere in lowmem, so we need to be able
...@@ -488,13 +485,7 @@ SYM_DATA_END(initial_page_table) ...@@ -488,13 +485,7 @@ SYM_DATA_END(initial_page_table)
.data .data
.balign 4 .balign 4
/* SYM_DATA(initial_stack, .long __top_init_kernel_stack)
* The SIZEOF_PTREGS gap is a convention which helps the in-kernel unwinder
* reliably detect the end of the stack.
*/
SYM_DATA(initial_stack,
.long init_thread_union + THREAD_SIZE -
SIZEOF_PTREGS - TOP_OF_KERNEL_STACK_PADDING)
__INITRODATA __INITRODATA
int_msg: int_msg:
......
...@@ -66,7 +66,7 @@ SYM_CODE_START_NOALIGN(startup_64) ...@@ -66,7 +66,7 @@ SYM_CODE_START_NOALIGN(startup_64)
mov %rsi, %r15 mov %rsi, %r15
/* Set up the stack for verify_cpu() */ /* Set up the stack for verify_cpu() */
leaq (__end_init_task - TOP_OF_KERNEL_STACK_PADDING - PTREGS_SIZE)(%rip), %rsp leaq __top_init_kernel_stack(%rip), %rsp
/* Setup GSBASE to allow stack canary access for C code */ /* Setup GSBASE to allow stack canary access for C code */
movl $MSR_GS_BASE, %ecx movl $MSR_GS_BASE, %ecx
......
...@@ -753,6 +753,22 @@ void __init setup_arch(char **cmdline_p) ...@@ -753,6 +753,22 @@ void __init setup_arch(char **cmdline_p)
boot_cpu_data.x86_phys_bits = MAX_PHYSMEM_BITS; boot_cpu_data.x86_phys_bits = MAX_PHYSMEM_BITS;
#endif #endif
#ifdef CONFIG_CMDLINE_BOOL
#ifdef CONFIG_CMDLINE_OVERRIDE
strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
#else
if (builtin_cmdline[0]) {
/* append boot loader cmdline to builtin */
strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
}
#endif
#endif
strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line;
/* /*
* If we have OLPC OFW, we might end up relocating the fixmap due to * If we have OLPC OFW, we might end up relocating the fixmap due to
* reserve_top(), so do this before touching the ioremap area. * reserve_top(), so do this before touching the ioremap area.
...@@ -832,22 +848,6 @@ void __init setup_arch(char **cmdline_p) ...@@ -832,22 +848,6 @@ void __init setup_arch(char **cmdline_p)
bss_resource.start = __pa_symbol(__bss_start); bss_resource.start = __pa_symbol(__bss_start);
bss_resource.end = __pa_symbol(__bss_stop)-1; bss_resource.end = __pa_symbol(__bss_stop)-1;
#ifdef CONFIG_CMDLINE_BOOL
#ifdef CONFIG_CMDLINE_OVERRIDE
strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
#else
if (builtin_cmdline[0]) {
/* append boot loader cmdline to builtin */
strlcat(builtin_cmdline, " ", COMMAND_LINE_SIZE);
strlcat(builtin_cmdline, boot_command_line, COMMAND_LINE_SIZE);
strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
}
#endif
#endif
strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
*cmdline_p = command_line;
/* /*
* x86_configure_nx() is called before parse_early_param() to detect * x86_configure_nx() is called before parse_early_param() to detect
* whether hardware doesn't support NX (so that the early EHCI debug * whether hardware doesn't support NX (so that the early EHCI debug
......
...@@ -172,6 +172,9 @@ SECTIONS ...@@ -172,6 +172,9 @@ SECTIONS
/* init_task */ /* init_task */
INIT_TASK_DATA(THREAD_SIZE) INIT_TASK_DATA(THREAD_SIZE)
/* equivalent to task_pt_regs(&init_task) */
__top_init_kernel_stack = __end_init_stack - TOP_OF_KERNEL_STACK_PADDING - PTREGS_SIZE;
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
/* 32 bit has nosave before _edata */ /* 32 bit has nosave before _edata */
NOSAVE_DATA NOSAVE_DATA
......
This diff is collapsed.
...@@ -49,7 +49,7 @@ SYM_CODE_START(startup_xen) ...@@ -49,7 +49,7 @@ SYM_CODE_START(startup_xen)
ANNOTATE_NOENDBR ANNOTATE_NOENDBR
cld cld
leaq (__end_init_task - TOP_OF_KERNEL_STACK_PADDING - PTREGS_SIZE)(%rip), %rsp leaq __top_init_kernel_stack(%rip), %rsp
/* Set up %gs. /* Set up %gs.
* *
......
...@@ -399,13 +399,13 @@ ...@@ -399,13 +399,13 @@
#define INIT_TASK_DATA(align) \ #define INIT_TASK_DATA(align) \
. = ALIGN(align); \ . = ALIGN(align); \
__start_init_task = .; \ __start_init_stack = .; \
init_thread_union = .; \ init_thread_union = .; \
init_stack = .; \ init_stack = .; \
KEEP(*(.data..init_task)) \ KEEP(*(.data..init_task)) \
KEEP(*(.data..init_thread_info)) \ KEEP(*(.data..init_thread_info)) \
. = __start_init_task + THREAD_SIZE; \ . = __start_init_stack + THREAD_SIZE; \
__end_init_task = .; __end_init_stack = .;
#define JUMP_TABLE_DATA \ #define JUMP_TABLE_DATA \
. = ALIGN(8); \ . = ALIGN(8); \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment