Commit 4f89fa28 authored by James Morse's avatar James Morse Committed by Rafael J. Wysocki

ACPI / APEI: Replace ioremap_page_range() with fixmap

Replace ghes_io{re,un}map_pfn_{nmi,irq}()s use of ioremap_page_range()
with __set_fixmap() as ioremap_page_range() may sleep to allocate a new
level of page-table, even if its passed an existing final-address to
use in the mapping.

The GHES driver can only be enabled for architectures that select
HAVE_ACPI_APEI: Add fixmap entries to both x86 and arm64.

clear_fixmap() does the TLB invalidation in __set_fixmap() for arm64
and __set_pte_vaddr() for x86. In each case its the same as the
respective arch_apei_flush_tlb_one().
Reported-by: default avatarFengguang Wu <fengguang.wu@intel.com>
Suggested-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Signed-off-by: default avatarJames Morse <james.morse@arm.com>
Reviewed-by: default avatarBorislav Petkov <bp@suse.de>
Tested-by: default avatarTyler Baicar <tbaicar@codeaurora.org>
Tested-by: default avatarToshi Kani <toshi.kani@hpe.com>
[ For the arm64 bits: ]
Acked-by: default avatarWill Deacon <will.deacon@arm.com>
[ For the x86 bits: ]
Acked-by: default avatarIngo Molnar <mingo@kernel.org>
Signed-off-by: default avatarRafael J. Wysocki <rafael.j.wysocki@intel.com>
Cc: All applicable <stable@vger.kernel.org>
parent c49870e8
...@@ -51,6 +51,13 @@ enum fixed_addresses { ...@@ -51,6 +51,13 @@ enum fixed_addresses {
FIX_EARLYCON_MEM_BASE, FIX_EARLYCON_MEM_BASE,
FIX_TEXT_POKE0, FIX_TEXT_POKE0,
#ifdef CONFIG_ACPI_APEI_GHES
/* Used for GHES mapping from assorted contexts */
FIX_APEI_GHES_IRQ,
FIX_APEI_GHES_NMI,
#endif /* CONFIG_ACPI_APEI_GHES */
__end_of_permanent_fixed_addresses, __end_of_permanent_fixed_addresses,
/* /*
......
...@@ -104,6 +104,12 @@ enum fixed_addresses { ...@@ -104,6 +104,12 @@ enum fixed_addresses {
FIX_GDT_REMAP_BEGIN, FIX_GDT_REMAP_BEGIN,
FIX_GDT_REMAP_END = FIX_GDT_REMAP_BEGIN + NR_CPUS - 1, FIX_GDT_REMAP_END = FIX_GDT_REMAP_BEGIN + NR_CPUS - 1,
#ifdef CONFIG_ACPI_APEI_GHES
/* Used for GHES mapping from assorted contexts */
FIX_APEI_GHES_IRQ,
FIX_APEI_GHES_NMI,
#endif
__end_of_permanent_fixed_addresses, __end_of_permanent_fixed_addresses,
/* /*
......
...@@ -51,6 +51,7 @@ ...@@ -51,6 +51,7 @@
#include <acpi/actbl1.h> #include <acpi/actbl1.h>
#include <acpi/ghes.h> #include <acpi/ghes.h>
#include <acpi/apei.h> #include <acpi/apei.h>
#include <asm/fixmap.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <ras/ras_event.h> #include <ras/ras_event.h>
...@@ -112,7 +113,7 @@ static DEFINE_MUTEX(ghes_list_mutex); ...@@ -112,7 +113,7 @@ static DEFINE_MUTEX(ghes_list_mutex);
* Because the memory area used to transfer hardware error information * Because the memory area used to transfer hardware error information
* from BIOS to Linux can be determined only in NMI, IRQ or timer * from BIOS to Linux can be determined only in NMI, IRQ or timer
* handler, but general ioremap can not be used in atomic context, so * handler, but general ioremap can not be used in atomic context, so
* a special version of atomic ioremap is implemented for that. * the fixmap is used instead.
*/ */
/* /*
...@@ -126,8 +127,8 @@ static DEFINE_MUTEX(ghes_list_mutex); ...@@ -126,8 +127,8 @@ static DEFINE_MUTEX(ghes_list_mutex);
/* virtual memory area for atomic ioremap */ /* virtual memory area for atomic ioremap */
static struct vm_struct *ghes_ioremap_area; static struct vm_struct *ghes_ioremap_area;
/* /*
* These 2 spinlock is used to prevent atomic ioremap virtual memory * These 2 spinlocks are used to prevent the fixmap entries from being used
* area from being mapped simultaneously. * simultaneously.
*/ */
static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi); static DEFINE_RAW_SPINLOCK(ghes_ioremap_lock_nmi);
static DEFINE_SPINLOCK(ghes_ioremap_lock_irq); static DEFINE_SPINLOCK(ghes_ioremap_lock_irq);
...@@ -159,53 +160,36 @@ static void ghes_ioremap_exit(void) ...@@ -159,53 +160,36 @@ static void ghes_ioremap_exit(void)
static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn) static void __iomem *ghes_ioremap_pfn_nmi(u64 pfn)
{ {
unsigned long vaddr;
phys_addr_t paddr; phys_addr_t paddr;
pgprot_t prot; pgprot_t prot;
vaddr = (unsigned long)GHES_IOREMAP_NMI_PAGE(ghes_ioremap_area->addr);
paddr = pfn << PAGE_SHIFT; paddr = pfn << PAGE_SHIFT;
prot = arch_apei_get_mem_attribute(paddr); prot = arch_apei_get_mem_attribute(paddr);
ioremap_page_range(vaddr, vaddr + PAGE_SIZE, paddr, prot); __set_fixmap(FIX_APEI_GHES_NMI, paddr, prot);
return (void __iomem *)vaddr; return (void __iomem *) fix_to_virt(FIX_APEI_GHES_NMI);
} }
static void __iomem *ghes_ioremap_pfn_irq(u64 pfn) static void __iomem *ghes_ioremap_pfn_irq(u64 pfn)
{ {
unsigned long vaddr;
phys_addr_t paddr; phys_addr_t paddr;
pgprot_t prot; pgprot_t prot;
vaddr = (unsigned long)GHES_IOREMAP_IRQ_PAGE(ghes_ioremap_area->addr);
paddr = pfn << PAGE_SHIFT; paddr = pfn << PAGE_SHIFT;
prot = arch_apei_get_mem_attribute(paddr); prot = arch_apei_get_mem_attribute(paddr);
__set_fixmap(FIX_APEI_GHES_IRQ, paddr, prot);
ioremap_page_range(vaddr, vaddr + PAGE_SIZE, paddr, prot); return (void __iomem *) fix_to_virt(FIX_APEI_GHES_IRQ);
return (void __iomem *)vaddr;
} }
static void ghes_iounmap_nmi(void __iomem *vaddr_ptr) static void ghes_iounmap_nmi(void)
{ {
unsigned long vaddr = (unsigned long __force)vaddr_ptr; clear_fixmap(FIX_APEI_GHES_NMI);
void *base = ghes_ioremap_area->addr;
BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_NMI_PAGE(base));
unmap_kernel_range_noflush(vaddr, PAGE_SIZE);
arch_apei_flush_tlb_one(vaddr);
} }
static void ghes_iounmap_irq(void __iomem *vaddr_ptr) static void ghes_iounmap_irq(void)
{ {
unsigned long vaddr = (unsigned long __force)vaddr_ptr; clear_fixmap(FIX_APEI_GHES_IRQ);
void *base = ghes_ioremap_area->addr;
BUG_ON(vaddr != (unsigned long)GHES_IOREMAP_IRQ_PAGE(base));
unmap_kernel_range_noflush(vaddr, PAGE_SIZE);
arch_apei_flush_tlb_one(vaddr);
} }
static int ghes_estatus_pool_init(void) static int ghes_estatus_pool_init(void)
...@@ -361,10 +345,10 @@ static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len, ...@@ -361,10 +345,10 @@ static void ghes_copy_tofrom_phys(void *buffer, u64 paddr, u32 len,
paddr += trunk; paddr += trunk;
buffer += trunk; buffer += trunk;
if (in_nmi) { if (in_nmi) {
ghes_iounmap_nmi(vaddr); ghes_iounmap_nmi();
raw_spin_unlock(&ghes_ioremap_lock_nmi); raw_spin_unlock(&ghes_ioremap_lock_nmi);
} else { } else {
ghes_iounmap_irq(vaddr); ghes_iounmap_irq();
spin_unlock_irqrestore(&ghes_ioremap_lock_irq, flags); spin_unlock_irqrestore(&ghes_ioremap_lock_irq, flags);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment