Commit 3d345e3f authored by Eric W. Biederman's avatar Eric W. Biederman Committed by Linus Torvalds

[PATCH] kexec: x86: add CONFIG_PYSICAL_START

For one kernel to report a crash another kernel has created we need
to have 2 kernels loaded simultaneously in memory.  To accomplish this
the two kernels need to built to run at different physical addresses.

This patch adds the CONFIG_PHYSICAL_START option to the x86 kernel
so we can do just that.  You need to know what you are doing and
the ramifications are before changing this value, and most users
won't care so I have made it depend on CONFIG_EMBEDDED

bzImage kernels will work and run at a different address when compiled
with this option but they will still load at 1MB.  If you need a kernel
loaded at a different address as well you need to boot a vmlinux.
Signed-off-by: default avatarEric Biederman <ebiederm@xmission.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 5ded01e8
...@@ -942,6 +942,17 @@ config SECCOMP ...@@ -942,6 +942,17 @@ config SECCOMP
source kernel/Kconfig.hz source kernel/Kconfig.hz
config PHYSICAL_START
hex "Physical address where the kernel is loaded" if EMBEDDED
default "0x100000"
help
This gives the physical address where the kernel is loaded.
Primarily used in the case of kexec on panic where the
fail safe kernel needs to run at a different address than
the panic-ed kernel.
Don't change this unless you know what you are doing.
endmenu endmenu
......
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/page.h>
.globl startup_32 .globl startup_32
...@@ -74,7 +75,7 @@ startup_32: ...@@ -74,7 +75,7 @@ startup_32:
popl %esi # discard address popl %esi # discard address
popl %esi # real mode pointer popl %esi # real mode pointer
xorl %ebx,%ebx xorl %ebx,%ebx
ljmp $(__BOOT_CS), $0x100000 ljmp $(__BOOT_CS), $__PHYSICAL_START
/* /*
* We come here, if we were loaded high. * We come here, if we were loaded high.
...@@ -99,7 +100,7 @@ startup_32: ...@@ -99,7 +100,7 @@ startup_32:
popl %ecx # lcount popl %ecx # lcount
popl %edx # high_buffer_start popl %edx # high_buffer_start
popl %eax # hcount popl %eax # hcount
movl $0x100000,%edi movl $__PHYSICAL_START,%edi
cli # make sure we don't get interrupted cli # make sure we don't get interrupted
ljmp $(__BOOT_CS), $0x1000 # and jump to the move routine ljmp $(__BOOT_CS), $0x1000 # and jump to the move routine
...@@ -124,5 +125,5 @@ move_routine_start: ...@@ -124,5 +125,5 @@ move_routine_start:
movsl movsl
movl %ebx,%esi # Restore setup pointer movl %ebx,%esi # Restore setup pointer
xorl %ebx,%ebx xorl %ebx,%ebx
ljmp $(__BOOT_CS), $0x100000 ljmp $(__BOOT_CS), $__PHYSICAL_START
move_routine_end: move_routine_end:
...@@ -13,6 +13,7 @@ ...@@ -13,6 +13,7 @@
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/tty.h> #include <linux/tty.h>
#include <asm/io.h> #include <asm/io.h>
#include <asm/page.h>
/* /*
* gzip declarations * gzip declarations
...@@ -308,7 +309,7 @@ static void setup_normal_output_buffer(void) ...@@ -308,7 +309,7 @@ static void setup_normal_output_buffer(void)
#else #else
if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < 1024) error("Less than 2MB of memory"); if ((RM_ALT_MEM_K > RM_EXT_MEM_K ? RM_ALT_MEM_K : RM_EXT_MEM_K) < 1024) error("Less than 2MB of memory");
#endif #endif
output_data = (char *)0x100000; /* Points to 1M */ output_data = (char *)__PHYSICAL_START; /* Normally Points to 1M */
free_mem_end_ptr = (long)real_mode; free_mem_end_ptr = (long)real_mode;
} }
...@@ -333,8 +334,8 @@ static void setup_output_buffer_if_we_run_high(struct moveparams *mv) ...@@ -333,8 +334,8 @@ static void setup_output_buffer_if_we_run_high(struct moveparams *mv)
low_buffer_size = low_buffer_end - LOW_BUFFER_START; low_buffer_size = low_buffer_end - LOW_BUFFER_START;
high_loaded = 1; high_loaded = 1;
free_mem_end_ptr = (long)high_buffer_start; free_mem_end_ptr = (long)high_buffer_start;
if ( (0x100000 + low_buffer_size) > ((ulg)high_buffer_start)) { if ( (__PHYSICAL_START + low_buffer_size) > ((ulg)high_buffer_start)) {
high_buffer_start = (uch *)(0x100000 + low_buffer_size); high_buffer_start = (uch *)(__PHYSICAL_START + low_buffer_size);
mv->hcount = 0; /* say: we need not to move high_buffer */ mv->hcount = 0; /* say: we need not to move high_buffer */
} }
else mv->hcount = -1; else mv->hcount = -1;
......
...@@ -14,7 +14,7 @@ ENTRY(phys_startup_32) ...@@ -14,7 +14,7 @@ ENTRY(phys_startup_32)
jiffies = jiffies_64; jiffies = jiffies_64;
SECTIONS SECTIONS
{ {
. = LOAD_OFFSET + 0x100000; . = __KERNEL_START;
phys_startup_32 = startup_32 - LOAD_OFFSET; phys_startup_32 = startup_32 - LOAD_OFFSET;
/* read-only */ /* read-only */
_text = .; /* Text and read-only data */ _text = .; /* Text and read-only data */
......
...@@ -126,9 +126,12 @@ extern int page_is_ram(unsigned long pagenr); ...@@ -126,9 +126,12 @@ extern int page_is_ram(unsigned long pagenr);
#ifdef __ASSEMBLY__ #ifdef __ASSEMBLY__
#define __PAGE_OFFSET (0xC0000000) #define __PAGE_OFFSET (0xC0000000)
#define __PHYSICAL_START CONFIG_PHYSICAL_START
#else #else
#define __PAGE_OFFSET (0xC0000000UL) #define __PAGE_OFFSET (0xC0000000UL)
#define __PHYSICAL_START ((unsigned long)CONFIG_PHYSICAL_START)
#endif #endif
#define __KERNEL_START (__PAGE_OFFSET + __PHYSICAL_START)
#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET) #define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment