Commit 5f64ec64 authored by H. Peter Anvin's avatar H. Peter Anvin

x86, boot: stylistic cleanups for boot/compressed/head_32.S

Reformat arch/x86/boot/compressed/head_32.S to be closer to currently
preferred kernel assembly style, that is:

- opcode and operand separated by tab
- operands separated by ", "
- C-style comments

This also makes it more similar to head_64.S.

[ Impact: cleanup, no object code change ]
Signed-off-by: default avatarH. Peter Anvin <hpa@zytor.com>
parent bd2a3698
......@@ -21,7 +21,7 @@
/*
* High loaded stuff by Hans Lermen & Werner Almesberger, Feb. 1996
*/
.text
.text
#include <linux/linkage.h>
#include <asm/segment.h>
......@@ -29,24 +29,27 @@
#include <asm/boot.h>
#include <asm/asm-offsets.h>
.section ".text.head","ax",@progbits
.section ".text.head","ax",@progbits
ENTRY(startup_32)
cld
/* test KEEP_SEGMENTS flag to see if the bootloader is asking
* us to not reload segments */
/*
* Test KEEP_SEGMENTS flag to see if the bootloader is asking
* us to not reload segments
*/
testb $(1<<6), BP_loadflags(%esi)
jnz 1f
cli
movl $(__BOOT_DS),%eax
movl %eax,%ds
movl %eax,%es
movl %eax,%fs
movl %eax,%gs
movl %eax,%ss
movl $__BOOT_DS, %eax
movl %eax, %ds
movl %eax, %es
movl %eax, %fs
movl %eax, %gs
movl %eax, %ss
1:
/* Calculate the delta between where we were compiled to run
/*
* Calculate the delta between where we were compiled to run
* at and where we were actually loaded at. This can only be done
* with a short local call on x86. Nothing else will tell us what
* address we are running at. The reserved chunk of the real-mode
......@@ -58,7 +61,8 @@ ENTRY(startup_32)
1: popl %ebp
subl $1b, %ebp
/* %ebp contains the address we are loaded at by the boot loader and %ebx
/*
* %ebp contains the address we are loaded at by the boot loader and %ebx
* contains the address where we should move the kernel image temporarily
* for safe in-place decompression.
*/
......@@ -84,7 +88,8 @@ ENTRY(startup_32)
addl $4095, %ebx
andl $~4095, %ebx
/* Copy the compressed kernel to the end of our buffer
/*
* Copy the compressed kernel to the end of our buffer
* where decompression in place becomes safe.
*/
pushl %esi
......@@ -92,12 +97,12 @@ ENTRY(startup_32)
leal _ebss(%ebx), %edi
movl $(_ebss - startup_32), %ecx
std
rep
movsb
rep movsb
cld
popl %esi
/* Compute the kernel start address.
/*
* Compute the kernel start address.
*/
#ifdef CONFIG_RELOCATABLE
addl $(CONFIG_PHYSICAL_ALIGN - 1), %ebp
......@@ -113,19 +118,18 @@ ENTRY(startup_32)
jmp *%eax
ENDPROC(startup_32)
.section ".text"
.text
relocated:
/*
* Clear BSS
*/
xorl %eax,%eax
leal _edata(%ebx),%edi
xorl %eax, %eax
leal _edata(%ebx), %edi
leal _ebss(%ebx), %ecx
subl %edi,%ecx
subl %edi, %ecx
cld
rep
stosb
rep stosb
/*
* Setup the stack for the decompressor
......@@ -137,26 +141,28 @@ relocated:
*/
movl output_len(%ebx), %eax
pushl %eax
# push arguments for decompress_kernel:
pushl %ebp # output address
/* push arguments for decompress_kernel: */
pushl %ebp /* output address */
movl input_len(%ebx), %eax
pushl %eax # input_len
pushl %eax /* input_len */
leal input_data(%ebx), %eax
pushl %eax # input_data
pushl %eax /* input_data */
leal boot_heap(%ebx), %eax
pushl %eax # heap area
pushl %esi # real mode pointer
pushl %eax /* heap area */
pushl %esi /* real mode pointer */
call decompress_kernel
addl $20, %esp
popl %ecx
#if CONFIG_RELOCATABLE
/* Find the address of the relocations.
/*
* Find the address of the relocations.
*/
movl %ebp, %edi
addl %ecx, %edi
/* Calculate the delta between where vmlinux was compiled to run
/*
* Calculate the delta between where vmlinux was compiled to run
* and where it was actually loaded.
*/
movl %ebp, %ebx
......@@ -167,7 +173,7 @@ relocated:
*/
1: subl $4, %edi
movl 0(%edi), %ecx
movl (%edi), %ecx
testl %ecx, %ecx
jz 2f
addl %ebx, -__PAGE_OFFSET(%ebx, %ecx)
......@@ -178,12 +184,14 @@ relocated:
/*
* Jump to the decompressed kernel.
*/
xorl %ebx,%ebx
xorl %ebx, %ebx
jmp *%ebp
.bss
/* Stack and heap for uncompression */
.balign 4
/*
* Stack and heap for uncompression
*/
.bss
.balign 4
boot_heap:
.fill BOOT_HEAP_SIZE, 1, 0
boot_stack:
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment