Commit 11520e5e authored by Linus Torvalds's avatar Linus Torvalds

Revert "x86-64/efi: Use EFI to deal with platform wall clock (again)"

This reverts commit bd52276f ("x86-64/efi: Use EFI to deal with
platform wall clock (again)"), and the two supporting commits:

  da5a108d: "x86/kernel: remove tboot 1:1 page table creation code"

  185034e7: "x86, efi: 1:1 pagetable mapping for virtual EFI calls")

as they all depend semantically on commit 53b87cf0 ("x86, mm:
Include the entire kernel memory map in trampoline_pgd") that got
reverted earlier due to the problems it caused.

This was pointed out by Yinghai Lu, and verified by me on my Macbook Air
that uses EFI.
Pointed-out-by: default avatarYinghai Lu <yinghai@kernel.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 5bd665f2
......@@ -69,37 +69,23 @@ extern u64 efi_call6(void *fp, u64 arg1, u64 arg2, u64 arg3,
efi_call6((void *)(f), (u64)(a1), (u64)(a2), (u64)(a3), \
(u64)(a4), (u64)(a5), (u64)(a6))
extern unsigned long efi_call_virt_prelog(void);
extern void efi_call_virt_epilog(unsigned long);
#define efi_callx(x, func, ...) \
({ \
efi_status_t __status; \
unsigned long __pgd; \
\
__pgd = efi_call_virt_prelog(); \
__status = efi_call##x(func, __VA_ARGS__); \
efi_call_virt_epilog(__pgd); \
__status; \
})
#define efi_call_virt0(f) \
efi_callx(0, (void *)(efi.systab->runtime->f))
efi_call0((void *)(efi.systab->runtime->f))
#define efi_call_virt1(f, a1) \
efi_callx(1, (void *)(efi.systab->runtime->f), (u64)(a1))
efi_call1((void *)(efi.systab->runtime->f), (u64)(a1))
#define efi_call_virt2(f, a1, a2) \
efi_callx(2, (void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2))
efi_call2((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2))
#define efi_call_virt3(f, a1, a2, a3) \
efi_callx(3, (void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
efi_call3((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
(u64)(a3))
#define efi_call_virt4(f, a1, a2, a3, a4) \
efi_callx(4, (void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
efi_call4((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
(u64)(a3), (u64)(a4))
#define efi_call_virt5(f, a1, a2, a3, a4, a5) \
efi_callx(5, (void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
efi_call5((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
(u64)(a3), (u64)(a4), (u64)(a5))
#define efi_call_virt6(f, a1, a2, a3, a4, a5, a6) \
efi_callx(6, (void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
efi_call6((void *)(efi.systab->runtime->f), (u64)(a1), (u64)(a2), \
(u64)(a3), (u64)(a4), (u64)(a5), (u64)(a6))
extern void __iomem *efi_ioremap(unsigned long addr, unsigned long size,
......
......@@ -103,13 +103,71 @@ void __init tboot_probe(void)
pr_debug("tboot_size: 0x%x\n", tboot->tboot_size);
}
static pgd_t *tboot_pg_dir;
static struct mm_struct tboot_mm = {
.mm_rb = RB_ROOT,
.pgd = swapper_pg_dir,
.mm_users = ATOMIC_INIT(2),
.mm_count = ATOMIC_INIT(1),
.mmap_sem = __RWSEM_INITIALIZER(init_mm.mmap_sem),
.page_table_lock = __SPIN_LOCK_UNLOCKED(init_mm.page_table_lock),
.mmlist = LIST_HEAD_INIT(init_mm.mmlist),
};
static inline void switch_to_tboot_pt(void)
{
#ifdef CONFIG_X86_32
load_cr3(initial_page_table);
#else
write_cr3(real_mode_header->trampoline_pgd);
#endif
write_cr3(virt_to_phys(tboot_pg_dir));
}
static int map_tboot_page(unsigned long vaddr, unsigned long pfn,
pgprot_t prot)
{
pgd_t *pgd;
pud_t *pud;
pmd_t *pmd;
pte_t *pte;
pgd = pgd_offset(&tboot_mm, vaddr);
pud = pud_alloc(&tboot_mm, pgd, vaddr);
if (!pud)
return -1;
pmd = pmd_alloc(&tboot_mm, pud, vaddr);
if (!pmd)
return -1;
pte = pte_alloc_map(&tboot_mm, NULL, pmd, vaddr);
if (!pte)
return -1;
set_pte_at(&tboot_mm, vaddr, pte, pfn_pte(pfn, prot));
pte_unmap(pte);
return 0;
}
static int map_tboot_pages(unsigned long vaddr, unsigned long start_pfn,
unsigned long nr)
{
/* Reuse the original kernel mapping */
tboot_pg_dir = pgd_alloc(&tboot_mm);
if (!tboot_pg_dir)
return -1;
for (; nr > 0; nr--, vaddr += PAGE_SIZE, start_pfn++) {
if (map_tboot_page(vaddr, start_pfn, PAGE_KERNEL_EXEC))
return -1;
}
return 0;
}
static void tboot_create_trampoline(void)
{
u32 map_base, map_size;
/* Create identity map for tboot shutdown code. */
map_base = PFN_DOWN(tboot->tboot_base);
map_size = PFN_UP(tboot->tboot_size);
if (map_tboot_pages(map_base << PAGE_SHIFT, map_base, map_size))
panic("tboot: Error mapping tboot pages (mfns) @ 0x%x, 0x%x\n",
map_base, map_size);
}
#ifdef CONFIG_ACPI_SLEEP
......@@ -167,6 +225,14 @@ void tboot_shutdown(u32 shutdown_type)
if (!tboot_enabled())
return;
/*
* if we're being called before the 1:1 mapping is set up then just
* return and let the normal shutdown happen; this should only be
* due to very early panic()
*/
if (!tboot_pg_dir)
return;
/* if this is S3 then set regions to MAC */
if (shutdown_type == TB_SHUTDOWN_S3)
if (tboot_setup_sleep())
......@@ -277,6 +343,8 @@ static __init int tboot_late_init(void)
if (!tboot_enabled())
return 0;
tboot_create_trampoline();
atomic_set(&ap_wfs_count, 0);
register_hotcpu_notifier(&tboot_cpu_notifier);
......
......@@ -919,13 +919,11 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
/*
* On success we use clflush, when the CPU supports it to
* avoid the wbindv. If the CPU does not support it, in the
* error case, and during early boot (for EFI) we fall back
* to cpa_flush_all (which uses wbinvd):
* avoid the wbindv. If the CPU does not support it and in the
* error case we fall back to cpa_flush_all (which uses
* wbindv):
*/
if (early_boot_irqs_disabled)
__cpa_flush_all((void *)(long)cache);
else if (!ret && cpu_has_clflush) {
if (!ret && cpu_has_clflush) {
if (cpa.flags & (CPA_PAGES_ARRAY | CPA_ARRAY)) {
cpa_flush_array(addr, numpages, cache,
cpa.flags, pages);
......
......@@ -239,7 +239,22 @@ static efi_status_t __init phys_efi_set_virtual_address_map(
return status;
}
static int efi_set_rtc_mmss(unsigned long nowtime)
static efi_status_t __init phys_efi_get_time(efi_time_t *tm,
efi_time_cap_t *tc)
{
unsigned long flags;
efi_status_t status;
spin_lock_irqsave(&rtc_lock, flags);
efi_call_phys_prelog();
status = efi_call_phys2(efi_phys.get_time, virt_to_phys(tm),
virt_to_phys(tc));
efi_call_phys_epilog();
spin_unlock_irqrestore(&rtc_lock, flags);
return status;
}
int efi_set_rtc_mmss(unsigned long nowtime)
{
int real_seconds, real_minutes;
efi_status_t status;
......@@ -268,7 +283,7 @@ static int efi_set_rtc_mmss(unsigned long nowtime)
return 0;
}
static unsigned long efi_get_time(void)
unsigned long efi_get_time(void)
{
efi_status_t status;
efi_time_t eft;
......@@ -624,13 +639,18 @@ static int __init efi_runtime_init(void)
}
/*
* We will only need *early* access to the following
* EFI runtime service before set_virtual_address_map
* two EFI runtime services before set_virtual_address_map
* is invoked.
*/
efi_phys.get_time = (efi_get_time_t *)runtime->get_time;
efi_phys.set_virtual_address_map =
(efi_set_virtual_address_map_t *)
runtime->set_virtual_address_map;
/*
* Make efi_get_time can be called before entering
* virtual mode.
*/
efi.get_time = phys_efi_get_time;
early_iounmap(runtime, sizeof(efi_runtime_services_t));
return 0;
......@@ -716,10 +736,12 @@ void __init efi_init(void)
efi_enabled = 0;
return;
}
#ifdef CONFIG_X86_32
if (efi_is_native()) {
x86_platform.get_wallclock = efi_get_time;
x86_platform.set_wallclock = efi_set_rtc_mmss;
}
#endif
#if EFI_DEBUG
print_efi_memmap();
......
......@@ -58,21 +58,6 @@ static void __init early_code_mapping_set_exec(int executable)
}
}
unsigned long efi_call_virt_prelog(void)
{
unsigned long saved;
saved = read_cr3();
write_cr3(real_mode_header->trampoline_pgd);
return saved;
}
void efi_call_virt_epilog(unsigned long saved)
{
write_cr3(saved);
}
void __init efi_call_phys_prelog(void)
{
unsigned long vaddress;
......
......@@ -587,6 +587,8 @@ extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size);
extern int __init efi_uart_console_only (void);
extern void efi_initialize_iomem_resources(struct resource *code_resource,
struct resource *data_resource, struct resource *bss_resource);
extern unsigned long efi_get_time(void);
extern int efi_set_rtc_mmss(unsigned long nowtime);
extern void efi_reserve_boot_services(void);
extern struct efi_memory_map memmap;
......
......@@ -463,10 +463,6 @@ static void __init mm_init(void)
percpu_init_late();
pgtable_cache_init();
vmalloc_init();
#ifdef CONFIG_X86
if (efi_enabled)
efi_enter_virtual_mode();
#endif
}
asmlinkage void __init start_kernel(void)
......@@ -607,6 +603,10 @@ asmlinkage void __init start_kernel(void)
calibrate_delay();
pidmap_init();
anon_vma_init();
#ifdef CONFIG_X86
if (efi_enabled)
efi_enter_virtual_mode();
#endif
thread_info_cache_init();
cred_init();
fork_init(totalram_pages);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment