Commit bd104e6d authored by Anshuman Khandual's avatar Anshuman Khandual Committed by Michael Ellerman

powerpc/pseries/svm: Use shared memory for LPPACA structures

LPPACA structures need to be shared with the host. Hence they need to be in
shared memory. Instead of allocating individual chunks of memory for a
given structure from memblock, a contiguous chunk of memory is allocated
and then converted into shared memory. Subsequent allocation requests will
come from the contiguous chunk which will be always shared memory for all
structures.

While we are able to use a kmem_cache constructor for the Debug Trace Log,
LPPACAs are allocated very early in the boot process (before SLUB is
available) so we need to use a simpler scheme here.

Introduce helper is_svm_platform() which uses the S bit of the MSR to tell
whether we're running as a secure guest.
Signed-off-by: default avatarAnshuman Khandual <khandual@linux.vnet.ibm.com>
Signed-off-by: default avatarThiago Jung Bauermann <bauerman@linux.ibm.com>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/20190820021326.6884-9-bauerman@linux.ibm.com
parent e311a92d
/* SPDX-License-Identifier: GPL-2.0+ */
/*
* SVM helper functions
*
* Copyright 2018 Anshuman Khandual, IBM Corporation.
*/
#ifndef _ASM_POWERPC_SVM_H
#define _ASM_POWERPC_SVM_H
#ifdef CONFIG_PPC_SVM
static inline bool is_secure_guest(void)
{
return mfmsr() & MSR_S;
}
#else /* CONFIG_PPC_SVM */
static inline bool is_secure_guest(void)
{
return false;
}
#endif /* CONFIG_PPC_SVM */
#endif /* _ASM_POWERPC_SVM_H */
...@@ -14,6 +14,8 @@ ...@@ -14,6 +14,8 @@
#include <asm/sections.h> #include <asm/sections.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/kexec.h> #include <asm/kexec.h>
#include <asm/svm.h>
#include <asm/ultravisor.h>
#include "setup.h" #include "setup.h"
...@@ -54,6 +56,41 @@ static void *__init alloc_paca_data(unsigned long size, unsigned long align, ...@@ -54,6 +56,41 @@ static void *__init alloc_paca_data(unsigned long size, unsigned long align,
#define LPPACA_SIZE 0x400 #define LPPACA_SIZE 0x400
static void *__init alloc_shared_lppaca(unsigned long size, unsigned long align,
unsigned long limit, int cpu)
{
size_t shared_lppaca_total_size = PAGE_ALIGN(nr_cpu_ids * LPPACA_SIZE);
static unsigned long shared_lppaca_size;
static void *shared_lppaca;
void *ptr;
if (!shared_lppaca) {
memblock_set_bottom_up(true);
shared_lppaca =
memblock_alloc_try_nid(shared_lppaca_total_size,
PAGE_SIZE, MEMBLOCK_LOW_LIMIT,
limit, NUMA_NO_NODE);
if (!shared_lppaca)
panic("cannot allocate shared data");
memblock_set_bottom_up(false);
uv_share_page(PHYS_PFN(__pa(shared_lppaca)),
shared_lppaca_total_size >> PAGE_SHIFT);
}
ptr = shared_lppaca + shared_lppaca_size;
shared_lppaca_size += size;
/*
* This is very early in boot, so no harm done if the kernel crashes at
* this point.
*/
BUG_ON(shared_lppaca_size >= shared_lppaca_total_size);
return ptr;
}
/* /*
* See asm/lppaca.h for more detail. * See asm/lppaca.h for more detail.
* *
...@@ -83,7 +120,11 @@ static struct lppaca * __init new_lppaca(int cpu, unsigned long limit) ...@@ -83,7 +120,11 @@ static struct lppaca * __init new_lppaca(int cpu, unsigned long limit)
if (early_cpu_has_feature(CPU_FTR_HVMODE)) if (early_cpu_has_feature(CPU_FTR_HVMODE))
return NULL; return NULL;
if (is_secure_guest())
lp = alloc_shared_lppaca(LPPACA_SIZE, 0x400, limit, cpu);
else
lp = alloc_paca_data(LPPACA_SIZE, 0x400, limit, cpu); lp = alloc_paca_data(LPPACA_SIZE, 0x400, limit, cpu);
init_lppaca(lp); init_lppaca(lp);
return lp; return lp;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment