Commit 5331d2c7 authored by Zhimin Gu's avatar Zhimin Gu Committed by Rafael J. Wysocki

x86-32, hibernate: Set up temporary text mapping for 32bit system

Set up the temporary text mapping for the final jump address
so that the system could jump to the right address after all
the pages have been copied back to their original address -
otherwise the final mapping for the jump address is invalid.

Analogous changes were made for 64-bit in commit 65c0554b
(x86/power/64: Fix kernel text mapping corruption during image
restoration).
Signed-off-by: default avatarZhimin Gu <kookoo.gu@intel.com>
Acked-by: default avatarPavel Machek <pavel@ucw.cz>
Signed-off-by: default avatarChen Yu <yu.c.chen@intel.com>
Acked-by: default avatarThomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarRafael J. Wysocki <rafael.j.wysocki@intel.com>
parent 6bae499a
...@@ -157,10 +157,8 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size) ...@@ -157,10 +157,8 @@ int arch_hibernation_header_save(void *addr, unsigned int max_size)
if (max_size < sizeof(struct restore_data_record)) if (max_size < sizeof(struct restore_data_record))
return -EOVERFLOW; return -EOVERFLOW;
rdr->magic = RESTORE_MAGIC; rdr->magic = RESTORE_MAGIC;
#ifdef CONFIG_X86_64
rdr->jump_address = (unsigned long)restore_registers; rdr->jump_address = (unsigned long)restore_registers;
rdr->jump_address_phys = __pa_symbol(restore_registers); rdr->jump_address_phys = __pa_symbol(restore_registers);
#endif
/* /*
* The restore code fixes up CR3 and CR4 in the following sequence: * The restore code fixes up CR3 and CR4 in the following sequence:
...@@ -198,10 +196,8 @@ int arch_hibernation_header_restore(void *addr) ...@@ -198,10 +196,8 @@ int arch_hibernation_header_restore(void *addr)
return -EINVAL; return -EINVAL;
} }
#ifdef CONFIG_X86_64
restore_jump_address = rdr->jump_address; restore_jump_address = rdr->jump_address;
jump_address_phys = rdr->jump_address_phys; jump_address_phys = rdr->jump_address_phys;
#endif
restore_cr3 = rdr->cr3; restore_cr3 = rdr->cr3;
if (hibernation_e820_mismatch(rdr->e820_digest)) { if (hibernation_e820_mismatch(rdr->e820_digest)) {
......
...@@ -143,6 +143,32 @@ static inline void resume_init_first_level_page_table(pgd_t *pg_dir) ...@@ -143,6 +143,32 @@ static inline void resume_init_first_level_page_table(pgd_t *pg_dir)
#endif #endif
} }
static int set_up_temporary_text_mapping(pgd_t *pgd_base)
{
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
pgd = pgd_base + pgd_index(restore_jump_address);
pmd = resume_one_md_table_init(pgd);
if (!pmd)
return -ENOMEM;
if (boot_cpu_has(X86_FEATURE_PSE)) {
set_pmd(pmd + pmd_index(restore_jump_address),
__pmd((jump_address_phys & PMD_MASK) | pgprot_val(PAGE_KERNEL_LARGE_EXEC)));
} else {
pte = resume_one_page_table_init(pmd);
if (!pte)
return -ENOMEM;
set_pte(pte + pte_index(restore_jump_address),
__pte((jump_address_phys & PAGE_MASK) | pgprot_val(PAGE_KERNEL_EXEC)));
}
return 0;
}
asmlinkage int swsusp_arch_resume(void) asmlinkage int swsusp_arch_resume(void)
{ {
int error; int error;
...@@ -152,6 +178,11 @@ asmlinkage int swsusp_arch_resume(void) ...@@ -152,6 +178,11 @@ asmlinkage int swsusp_arch_resume(void)
return -ENOMEM; return -ENOMEM;
resume_init_first_level_page_table(resume_pg_dir); resume_init_first_level_page_table(resume_pg_dir);
error = set_up_temporary_text_mapping(resume_pg_dir);
if (error)
return error;
error = resume_physical_mapping_init(resume_pg_dir); error = resume_physical_mapping_init(resume_pg_dir);
if (error) if (error)
return error; return error;
......
...@@ -36,6 +36,8 @@ ENTRY(swsusp_arch_suspend) ...@@ -36,6 +36,8 @@ ENTRY(swsusp_arch_suspend)
ENDPROC(swsusp_arch_suspend) ENDPROC(swsusp_arch_suspend)
ENTRY(restore_image) ENTRY(restore_image)
/* prepare to jump to the image kernel */
movl restore_jump_address, %ebx
movl restore_cr3, %ebp movl restore_cr3, %ebp
movl mmu_cr4_features, %ecx movl mmu_cr4_features, %ecx
...@@ -74,6 +76,7 @@ copy_loop: ...@@ -74,6 +76,7 @@ copy_loop:
.p2align 4,,7 .p2align 4,,7
done: done:
jmpl *%ebx
/* code below belongs to the image kernel */ /* code below belongs to the image kernel */
.align PAGE_SIZE .align PAGE_SIZE
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment