Commit 481f75c0 authored by Dave Young's avatar Dave Young Committed by Matt Fleming

x86/efi: Cleanup efi_enter_virtual_mode() function

Add two small functions:
efi_merge_regions() and efi_map_regions(), efi_enter_virtual_mode()
calls them instead of embedding two long for loop.
Signed-off-by: default avatarDave Young <dyoung@redhat.com>
Acked-by: default avatarBorislav Petkov <bp@suse.de>
Tested-by: default avatarToshi Kani <toshi.kani@hp.com>
Signed-off-by: default avatarMatt Fleming <matt.fleming@intel.com>
parent a7f84f03
......@@ -773,44 +773,12 @@ void __init old_map_region(efi_memory_desc_t *md)
(unsigned long long)md->phys_addr);
}
/*
* This function will switch the EFI runtime services to virtual mode.
* Essentially, we look through the EFI memmap and map every region that
* has the runtime attribute bit set in its memory descriptor into the
* ->trampoline_pgd page table using a top-down VA allocation scheme.
*
* The old method which used to update that memory descriptor with the
* virtual address obtained from ioremap() is still supported when the
* kernel is booted with efi=old_map on its command line. Same old
* method enabled the runtime services to be called without having to
* thunk back into physical mode for every invocation.
*
* The new method does a pagetable switch in a preemption-safe manner
* so that we're in a different address space when calling a runtime
* function. For function arguments passing we do copy the PGDs of the
* kernel page table into ->trampoline_pgd prior to each call.
*/
void __init efi_enter_virtual_mode(void)
/* Merge contiguous regions of the same type and attribute */
static void __init efi_merge_regions(void)
{
void *p;
efi_memory_desc_t *md, *prev_md = NULL;
void *p, *new_memmap = NULL;
unsigned long size;
efi_status_t status;
u64 end, systab;
int count = 0;
efi.systab = NULL;
/*
* We don't do virtual mode, since we don't do runtime services, on
* non-native EFI
*/
if (!efi_is_native()) {
efi_unmap_memmap();
return;
}
/* Merge contiguous regions of the same type and attribute */
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
u64 prev_size;
md = p;
......@@ -835,8 +803,31 @@ void __init efi_enter_virtual_mode(void)
continue;
}
prev_md = md;
}
}
static void __init get_systab_virt_addr(efi_memory_desc_t *md)
{
unsigned long size;
u64 end, systab;
size = md->num_pages << EFI_PAGE_SHIFT;
end = md->phys_addr + size;
systab = (u64)(unsigned long)efi_phys.systab;
if (md->phys_addr <= systab && systab < end) {
systab += md->virt_addr - md->phys_addr;
efi.systab = (efi_system_table_t *)(unsigned long)systab;
}
}
/*
* Map efi memory ranges for runtime serivce and update new_memmap with virtual
* addresses.
*/
static void * __init efi_map_regions(int *count)
{
efi_memory_desc_t *md;
void *p, *tmp, *new_memmap = NULL;
for (p = memmap.map; p < memmap.map_end; p += memmap.desc_size) {
md = p;
......@@ -849,26 +840,64 @@ void __init efi_enter_virtual_mode(void)
}
efi_map_region(md);
get_systab_virt_addr(md);
size = md->num_pages << EFI_PAGE_SHIFT;
end = md->phys_addr + size;
tmp = krealloc(new_memmap, (*count + 1) * memmap.desc_size,
GFP_KERNEL);
if (!tmp)
goto out_krealloc;
new_memmap = tmp;
memcpy(new_memmap + (*count * memmap.desc_size), md,
memmap.desc_size);
(*count)++;
}
systab = (u64) (unsigned long) efi_phys.systab;
if (md->phys_addr <= systab && systab < end) {
systab += md->virt_addr - md->phys_addr;
return new_memmap;
out_krealloc:
kfree(new_memmap);
return NULL;
}
/*
* This function will switch the EFI runtime services to virtual mode.
* Essentially, we look through the EFI memmap and map every region that
* has the runtime attribute bit set in its memory descriptor into the
* ->trampoline_pgd page table using a top-down VA allocation scheme.
*
* The old method which used to update that memory descriptor with the
* virtual address obtained from ioremap() is still supported when the
* kernel is booted with efi=old_map on its command line. Same old
* method enabled the runtime services to be called without having to
* thunk back into physical mode for every invocation.
*
* The new method does a pagetable switch in a preemption-safe manner
* so that we're in a different address space when calling a runtime
* function. For function arguments passing we do copy the PGDs of the
* kernel page table into ->trampoline_pgd prior to each call.
*/
void __init efi_enter_virtual_mode(void)
{
efi_status_t status;
void *new_memmap = NULL;
int count = 0;
efi.systab = (efi_system_table_t *) (unsigned long) systab;
efi.systab = NULL;
/*
* We don't do virtual mode, since we don't do runtime services, on
* non-native EFI
*/
if (!efi_is_native()) {
efi_unmap_memmap();
return;
}
new_memmap = krealloc(new_memmap,
(count + 1) * memmap.desc_size,
GFP_KERNEL);
if (!new_memmap)
goto err_out;
efi_merge_regions();
memcpy(new_memmap + (count * memmap.desc_size), md,
memmap.desc_size);
count++;
new_memmap = efi_map_regions(&count);
if (!new_memmap) {
pr_err("Error reallocating memory, EFI runtime non-functional!\n");
return;
}
BUG_ON(!efi.systab);
......@@ -922,9 +951,6 @@ void __init efi_enter_virtual_mode(void)
0, NULL);
return;
err_out:
pr_err("Error reallocating memory, EFI runtime non-functional!\n");
}
/*
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment