Commit 3eeb0778 authored by Heiko Carstens's avatar Heiko Carstens

s390/amode31: change type of __samode31, __eamode31, etc

For consistencs reasons change the type of __samode31, __eamode31,
__stext_amode31, and __etext_amode31 to a char pointer so they
(nearly) match the type of all other sections.

This allows for code simplifications with follow-on patches.
Reviewed-by: default avatarAlexander Gordeev <agordeev@linux.ibm.com>
Signed-off-by: default avatarHeiko Carstens <hca@linux.ibm.com>
parent c0f1d478
...@@ -23,7 +23,7 @@ ...@@ -23,7 +23,7 @@
*/ */
#define __bootdata_preserved(var) __section(".boot.preserved.data." #var) var #define __bootdata_preserved(var) __section(".boot.preserved.data." #var) var
extern unsigned long __samode31, __eamode31; extern char *__samode31, *__eamode31;
extern unsigned long __stext_amode31, __etext_amode31; extern char *__stext_amode31, *__etext_amode31;
#endif #endif
...@@ -216,8 +216,8 @@ void arch_crash_save_vmcoreinfo(void) ...@@ -216,8 +216,8 @@ void arch_crash_save_vmcoreinfo(void)
VMCOREINFO_SYMBOL(lowcore_ptr); VMCOREINFO_SYMBOL(lowcore_ptr);
VMCOREINFO_SYMBOL(high_memory); VMCOREINFO_SYMBOL(high_memory);
VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS); VMCOREINFO_LENGTH(lowcore_ptr, NR_CPUS);
vmcoreinfo_append_str("SAMODE31=%lx\n", __samode31); vmcoreinfo_append_str("SAMODE31=%lx\n", (unsigned long)__samode31);
vmcoreinfo_append_str("EAMODE31=%lx\n", __eamode31); vmcoreinfo_append_str("EAMODE31=%lx\n", (unsigned long)__eamode31);
vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset()); vmcoreinfo_append_str("KERNELOFFSET=%lx\n", kaslr_offset());
abs_lc = get_abs_lowcore(); abs_lc = get_abs_lowcore();
abs_lc->vmcore_info = paddr_vmcoreinfo_note(); abs_lc->vmcore_info = paddr_vmcoreinfo_note();
......
...@@ -97,10 +97,10 @@ EXPORT_SYMBOL(console_irq); ...@@ -97,10 +97,10 @@ EXPORT_SYMBOL(console_irq);
* relocated above 2 GB, because it has to use 31 bit addresses. * relocated above 2 GB, because it has to use 31 bit addresses.
* Such code and data is part of the .amode31 section. * Such code and data is part of the .amode31 section.
*/ */
unsigned long __amode31_ref __samode31 = (unsigned long)&_samode31; char __amode31_ref *__samode31 = _samode31;
unsigned long __amode31_ref __eamode31 = (unsigned long)&_eamode31; char __amode31_ref *__eamode31 = _eamode31;
unsigned long __amode31_ref __stext_amode31 = (unsigned long)&_stext_amode31; char __amode31_ref *__stext_amode31 = _stext_amode31;
unsigned long __amode31_ref __etext_amode31 = (unsigned long)&_etext_amode31; char __amode31_ref *__etext_amode31 = _etext_amode31;
struct exception_table_entry __amode31_ref *__start_amode31_ex_table = _start_amode31_ex_table; struct exception_table_entry __amode31_ref *__start_amode31_ex_table = _start_amode31_ex_table;
struct exception_table_entry __amode31_ref *__stop_amode31_ex_table = _stop_amode31_ex_table; struct exception_table_entry __amode31_ref *__stop_amode31_ex_table = _stop_amode31_ex_table;
...@@ -770,15 +770,15 @@ static void __init setup_memory(void) ...@@ -770,15 +770,15 @@ static void __init setup_memory(void)
static void __init relocate_amode31_section(void) static void __init relocate_amode31_section(void)
{ {
unsigned long amode31_size = __eamode31 - __samode31; unsigned long amode31_size = __eamode31 - __samode31;
long amode31_offset = physmem_info.reserved[RR_AMODE31].start - __samode31; long amode31_offset, *ptr;
long *ptr;
amode31_offset = physmem_info.reserved[RR_AMODE31].start - (unsigned long)__samode31;
pr_info("Relocating AMODE31 section of size 0x%08lx\n", amode31_size); pr_info("Relocating AMODE31 section of size 0x%08lx\n", amode31_size);
/* Move original AMODE31 section to the new one */ /* Move original AMODE31 section to the new one */
memmove((void *)physmem_info.reserved[RR_AMODE31].start, (void *)__samode31, amode31_size); memmove((void *)physmem_info.reserved[RR_AMODE31].start, __samode31, amode31_size);
/* Zero out the old AMODE31 section to catch invalid accesses within it */ /* Zero out the old AMODE31 section to catch invalid accesses within it */
memset((void *)__samode31, 0, amode31_size); memset(__samode31, 0, amode31_size);
/* Update all AMODE31 region references */ /* Update all AMODE31 region references */
for (ptr = _start_amode31_refs; ptr != _end_amode31_refs; ptr++) for (ptr = _start_amode31_refs; ptr != _end_amode31_refs; ptr++)
......
...@@ -290,8 +290,8 @@ static int pt_dump_init(void) ...@@ -290,8 +290,8 @@ static int pt_dump_init(void)
max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2; max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2;
max_addr = 1UL << (max_addr * 11 + 31); max_addr = 1UL << (max_addr * 11 + 31);
address_markers[IDENTITY_AFTER_END_NR].start_address = ident_map_size; address_markers[IDENTITY_AFTER_END_NR].start_address = ident_map_size;
address_markers[AMODE31_START_NR].start_address = __samode31; address_markers[AMODE31_START_NR].start_address = (unsigned long)__samode31;
address_markers[AMODE31_END_NR].start_address = __eamode31; address_markers[AMODE31_END_NR].start_address = (unsigned long)__eamode31;
address_markers[MODULES_NR].start_address = MODULES_VADDR; address_markers[MODULES_NR].start_address = MODULES_VADDR;
address_markers[MODULES_END_NR].start_address = MODULES_END; address_markers[MODULES_END_NR].start_address = MODULES_END;
address_markers[ABS_LOWCORE_NR].start_address = __abs_lowcore; address_markers[ABS_LOWCORE_NR].start_address = __abs_lowcore;
......
...@@ -657,8 +657,8 @@ void __init vmem_map_init(void) ...@@ -657,8 +657,8 @@ void __init vmem_map_init(void)
(unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT); (unsigned long)(__end_rodata - _etext) >> PAGE_SHIFT);
set_memory_rox((unsigned long)_sinittext, set_memory_rox((unsigned long)_sinittext,
(unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT); (unsigned long)(_einittext - _sinittext) >> PAGE_SHIFT);
set_memory_rox(__stext_amode31, set_memory_rox((unsigned long)__stext_amode31,
(__etext_amode31 - __stext_amode31) >> PAGE_SHIFT); (unsigned long)(__etext_amode31 - __stext_amode31) >> PAGE_SHIFT);
/* lowcore must be executable for LPSWE */ /* lowcore must be executable for LPSWE */
if (!static_key_enabled(&cpu_has_bear)) if (!static_key_enabled(&cpu_has_bear))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment