Commit a89d7ff9 authored by Pavel Tatashin's avatar Pavel Tatashin Committed by Will Deacon

arm64: hibernate: remove gotos as they are not needed

Usually, gotos are used to handle cleanup after exception, but in case of
create_safe_exec_page and swsusp_arch_resume there are no clean-ups. So,
simply return the errors directly.
Signed-off-by: default avatarPavel Tatashin <pasha.tatashin@soleen.com>
Reviewed-by: default avatarJames Morse <james.morse@arm.com>
Signed-off-by: default avatarWill Deacon <will@kernel.org>
parent 051a7a94
...@@ -198,7 +198,6 @@ static int create_safe_exec_page(void *src_start, size_t length, ...@@ -198,7 +198,6 @@ static int create_safe_exec_page(void *src_start, size_t length,
unsigned long dst_addr, unsigned long dst_addr,
phys_addr_t *phys_dst_addr) phys_addr_t *phys_dst_addr)
{ {
int rc = 0;
pgd_t *trans_pgd; pgd_t *trans_pgd;
pgd_t *pgdp; pgd_t *pgdp;
pud_t *pudp; pud_t *pudp;
...@@ -206,47 +205,37 @@ static int create_safe_exec_page(void *src_start, size_t length, ...@@ -206,47 +205,37 @@ static int create_safe_exec_page(void *src_start, size_t length,
pte_t *ptep; pte_t *ptep;
unsigned long dst = get_safe_page(GFP_ATOMIC); unsigned long dst = get_safe_page(GFP_ATOMIC);
if (!dst) { if (!dst)
rc = -ENOMEM; return -ENOMEM;
goto out;
}
memcpy((void *)dst, src_start, length); memcpy((void *)dst, src_start, length);
__flush_icache_range(dst, dst + length); __flush_icache_range(dst, dst + length);
trans_pgd = (void *)get_safe_page(GFP_ATOMIC); trans_pgd = (void *)get_safe_page(GFP_ATOMIC);
if (!trans_pgd) { if (!trans_pgd)
rc = -ENOMEM; return -ENOMEM;
goto out;
}
pgdp = pgd_offset_raw(trans_pgd, dst_addr); pgdp = pgd_offset_raw(trans_pgd, dst_addr);
if (pgd_none(READ_ONCE(*pgdp))) { if (pgd_none(READ_ONCE(*pgdp))) {
pudp = (void *)get_safe_page(GFP_ATOMIC); pudp = (void *)get_safe_page(GFP_ATOMIC);
if (!pudp) { if (!pudp)
rc = -ENOMEM; return -ENOMEM;
goto out;
}
pgd_populate(&init_mm, pgdp, pudp); pgd_populate(&init_mm, pgdp, pudp);
} }
pudp = pud_offset(pgdp, dst_addr); pudp = pud_offset(pgdp, dst_addr);
if (pud_none(READ_ONCE(*pudp))) { if (pud_none(READ_ONCE(*pudp))) {
pmdp = (void *)get_safe_page(GFP_ATOMIC); pmdp = (void *)get_safe_page(GFP_ATOMIC);
if (!pmdp) { if (!pmdp)
rc = -ENOMEM; return -ENOMEM;
goto out;
}
pud_populate(&init_mm, pudp, pmdp); pud_populate(&init_mm, pudp, pmdp);
} }
pmdp = pmd_offset(pudp, dst_addr); pmdp = pmd_offset(pudp, dst_addr);
if (pmd_none(READ_ONCE(*pmdp))) { if (pmd_none(READ_ONCE(*pmdp))) {
ptep = (void *)get_safe_page(GFP_ATOMIC); ptep = (void *)get_safe_page(GFP_ATOMIC);
if (!ptep) { if (!ptep)
rc = -ENOMEM; return -ENOMEM;
goto out;
}
pmd_populate_kernel(&init_mm, pmdp, ptep); pmd_populate_kernel(&init_mm, pmdp, ptep);
} }
...@@ -272,8 +261,7 @@ static int create_safe_exec_page(void *src_start, size_t length, ...@@ -272,8 +261,7 @@ static int create_safe_exec_page(void *src_start, size_t length,
*phys_dst_addr = virt_to_phys((void *)dst); *phys_dst_addr = virt_to_phys((void *)dst);
out: return 0;
return rc;
} }
#define dcache_clean_range(start, end) __flush_dcache_area(start, (end - start)) #define dcache_clean_range(start, end) __flush_dcache_area(start, (end - start))
...@@ -482,7 +470,7 @@ static int copy_page_tables(pgd_t *dst_pgdp, unsigned long start, ...@@ -482,7 +470,7 @@ static int copy_page_tables(pgd_t *dst_pgdp, unsigned long start,
*/ */
int swsusp_arch_resume(void) int swsusp_arch_resume(void)
{ {
int rc = 0; int rc;
void *zero_page; void *zero_page;
size_t exit_size; size_t exit_size;
pgd_t *tmp_pg_dir; pgd_t *tmp_pg_dir;
...@@ -498,12 +486,11 @@ int swsusp_arch_resume(void) ...@@ -498,12 +486,11 @@ int swsusp_arch_resume(void)
tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC); tmp_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC);
if (!tmp_pg_dir) { if (!tmp_pg_dir) {
pr_err("Failed to allocate memory for temporary page tables.\n"); pr_err("Failed to allocate memory for temporary page tables.\n");
rc = -ENOMEM; return -ENOMEM;
goto out;
} }
rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, PAGE_END); rc = copy_page_tables(tmp_pg_dir, PAGE_OFFSET, PAGE_END);
if (rc) if (rc)
goto out; return rc;
/* /*
* We need a zero page that is zero before & after resume in order to * We need a zero page that is zero before & after resume in order to
...@@ -512,8 +499,7 @@ int swsusp_arch_resume(void) ...@@ -512,8 +499,7 @@ int swsusp_arch_resume(void)
zero_page = (void *)get_safe_page(GFP_ATOMIC); zero_page = (void *)get_safe_page(GFP_ATOMIC);
if (!zero_page) { if (!zero_page) {
pr_err("Failed to allocate zero page.\n"); pr_err("Failed to allocate zero page.\n");
rc = -ENOMEM; return -ENOMEM;
goto out;
} }
/* /*
...@@ -531,7 +517,7 @@ int swsusp_arch_resume(void) ...@@ -531,7 +517,7 @@ int swsusp_arch_resume(void)
&phys_hibernate_exit); &phys_hibernate_exit);
if (rc) { if (rc) {
pr_err("Failed to create safe executable page for hibernate_exit code.\n"); pr_err("Failed to create safe executable page for hibernate_exit code.\n");
goto out; return rc;
} }
/* /*
...@@ -558,8 +544,7 @@ int swsusp_arch_resume(void) ...@@ -558,8 +544,7 @@ int swsusp_arch_resume(void)
resume_hdr.reenter_kernel, restore_pblist, resume_hdr.reenter_kernel, restore_pblist,
resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page)); resume_hdr.__hyp_stub_vectors, virt_to_phys(zero_page));
out: return 0;
return rc;
} }
int hibernate_resume_nonboot_cpu_disable(void) int hibernate_resume_nonboot_cpu_disable(void)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment