Commit be1b3d8c authored by Sam Ravnborg's avatar Sam Ravnborg Committed by Kyle McMartin

[PARISC] Beautify parisc vmlinux.lds.S

Introduce a consistent layout of vmlinux.
The same layout has been introduced for most
architectures.

And the same time move a few label definitions inside
the curly brackets so they are assigned the correct
starting address. Before a ld inserted alignment
would have casued the label to pint before the actual
start of the section.
Signed-off-by: default avatarSam Ravnborg <sam@ravnborg.org>
Signed-off-by: default avatarKyle McMartin <kyle@mcmartin.ca>
parent e9a03990
......@@ -46,7 +46,6 @@ jiffies = jiffies_64;
#endif
SECTIONS
{
. = KERNEL_BINARY_TEXT_START;
_text = .; /* Text and read-only data */
......@@ -63,58 +62,76 @@ SECTIONS
*(.lock.text) /* out-of-line lock text */
*(.gnu.warning)
} = 0
_etext = .; /* End of text section */
/* End of text section */
_etext = .;
RODATA
BUG_TABLE
/* writeable */
. = ALIGN(ASM_PAGE_SIZE); /* Make sure this is page aligned so
that we can properly leave these
as writable */
/* Make sure this is page aligned so
* that we can properly leave these
* as writable
*/
. = ALIGN(ASM_PAGE_SIZE);
data_start = .;
. = ALIGN(16); /* Exception table */
. = ALIGN(16);
/* Exception table */
__ex_table : {
__start___ex_table = .;
__ex_table : { *(__ex_table) }
*(__ex_table)
__stop___ex_table = .;
}
NOTES
__start___unwind = .; /* unwind info */
.PARISC.unwind : { *(.PARISC.unwind) }
/* unwind info */
.PARISC.unwind : {
__start___unwind = .;
*(.PARISC.unwind)
__stop___unwind = .;
}
/* rarely changed data like cpu maps */
. = ALIGN(16);
.data.read_mostly : { *(.data.read_mostly) }
.data.read_mostly : {
*(.data.read_mostly)
}
. = ALIGN(L1_CACHE_BYTES);
.data : { /* Data */
/* Data */
.data : {
DATA_DATA
CONSTRUCTORS
}
. = ALIGN(L1_CACHE_BYTES);
.data.cacheline_aligned : { *(.data.cacheline_aligned) }
.data.cacheline_aligned : {
*(.data.cacheline_aligned)
}
/* PA-RISC locks requires 16-byte alignment */
. = ALIGN(16);
.data.lock_aligned : { *(.data.lock_aligned) }
.data.lock_aligned : {
*(.data.lock_aligned)
}
. = ALIGN(ASM_PAGE_SIZE);
/* nosave data is really only used for software suspend...it's here
* just in case we ever implement it */
* just in case we ever implement it
*/
. = ALIGN(ASM_PAGE_SIZE);
__nosave_begin = .;
.data_nosave : { *(.data.nosave) }
.data_nosave : {
*(.data.nosave)
}
. = ALIGN(ASM_PAGE_SIZE);
__nosave_end = .;
_edata = .; /* End of data section */
/* End of data section */
_edata = .;
__bss_start = .; /* BSS */
/* BSS */
__bss_start = .;
/* page table entries need to be PAGE_SIZE aligned */
. = ALIGN(ASM_PAGE_SIZE);
.data.vmpages : {
......@@ -122,24 +139,40 @@ SECTIONS
*(.data.vm0.pgd)
*(.data.vm0.pte)
}
.bss : { *(.bss) *(COMMON) }
.bss : {
*(.bss)
*(COMMON)
}
__bss_stop = .;
/* assembler code expects init_task to be 16k aligned */
. = ALIGN(16384); /* init_task */
.data.init_task : { *(.data.init_task) }
. = ALIGN(16384);
/* init_task */
.data.init_task : {
*(.data.init_task)
}
/* The interrupt stack is currently partially coded, but not yet
* implemented */
* implemented
*/
. = ALIGN(16384);
init_istack : { *(init_istack) }
init_istack : {
*(init_istack)
}
#ifdef CONFIG_64BIT
. = ALIGN(16); /* Linkage tables */
.opd : { *(.opd) } PROVIDE (__gp = .);
.plt : { *(.plt) }
.dlt : { *(.dlt) }
. = ALIGN(16);
/* Linkage tables */
.opd : {
*(.opd)
} PROVIDE (__gp = .);
.plt : {
*(.plt)
}
.dlt : {
*(.dlt)
}
#endif
/* reserve space for interrupt stack by aligning __init* to 16k */
......@@ -150,46 +183,64 @@ SECTIONS
*(.init.text)
_einittext = .;
}
.init.data : { *(.init.data) }
.init.data : {
*(.init.data)
}
. = ALIGN(16);
.init.setup : {
__setup_start = .;
.init.setup : { *(.init.setup) }
*(.init.setup)
__setup_end = .;
__initcall_start = .;
}
.initcall.init : {
__initcall_start = .;
INITCALLS
}
__initcall_end = .;
}
.con_initcall.init : {
__con_initcall_start = .;
.con_initcall.init : { *(.con_initcall.init) }
*(.con_initcall.init)
__con_initcall_end = .;
}
SECURITY_INIT
/* alternate instruction replacement. This is a mechanism x86 uses
* to detect the CPU type and replace generic instruction sequences
* with CPU specific ones. We don't currently do this in PA, but
* it seems like a good idea... */
* it seems like a good idea...
*/
. = ALIGN(4);
.altinstructions : {
__alt_instructions = .;
.altinstructions : { *(.altinstructions) }
*(.altinstructions)
__alt_instructions_end = .;
.altinstr_replacement : { *(.altinstr_replacement) }
}
.altinstr_replacement : {
*(.altinstr_replacement)
}
/* .exit.text is discard at runtime, not link time, to deal with references
from .altinstructions and .eh_frame */
.exit.text : { *(.exit.text) }
.exit.data : { *(.exit.data) }
* from .altinstructions and .eh_frame
*/
.exit.text : {
*(.exit.text)
}
.exit.data : {
*(.exit.data)
}
#ifdef CONFIG_BLK_DEV_INITRD
. = ALIGN(ASM_PAGE_SIZE);
.init.ramfs : {
__initramfs_start = .;
.init.ramfs : { *(.init.ramfs) }
*(.init.ramfs)
__initramfs_end = .;
}
#endif
PERCPU(ASM_PAGE_SIZE)
. = ALIGN(ASM_PAGE_SIZE);
__init_end = .;
/* freed after init ends here */
_end = . ;
/* Sections to be discarded */
......@@ -197,7 +248,8 @@ SECTIONS
*(.exitcall.exit)
#ifdef CONFIG_64BIT
/* temporary hack until binutils is fixed to not emit these
for static binaries */
* for static binaries
*/
*(.interp)
*(.dynsym)
*(.dynstr)
......@@ -209,5 +261,4 @@ SECTIONS
STABS_DEBUG
.note 0 : { *(.note) }
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment