Commit 8e0b634b authored by Nicholas Piggin's avatar Nicholas Piggin Committed by Michael Ellerman

powerpc/64s: Do not allocate lppaca if we are not virtualized

The "lppaca" is a structure registered with the hypervisor. This is
unnecessary when running on non-virtualised platforms. One field from
the lppaca (pmcregs_in_use) is also used by the host, so move the host
part out into the paca (lppaca field is still updated in
guest mode).
Signed-off-by: default avatarNicholas Piggin <npiggin@gmail.com>
[mpe: Fix non-pseries build with some #ifdefs]
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 0834d627
......@@ -46,7 +46,10 @@ extern unsigned int debug_smp_processor_id(void); /* from linux/smp.h */
#define get_paca() local_paca
#endif
#ifdef CONFIG_PPC_PSERIES
#define get_lppaca() (get_paca()->lppaca_ptr)
#endif
#define get_slb_shadow() (get_paca()->slb_shadow_ptr)
struct task_struct;
......@@ -58,7 +61,7 @@ struct task_struct;
* processor.
*/
struct paca_struct {
#ifdef CONFIG_PPC_BOOK3S
#ifdef CONFIG_PPC_PSERIES
/*
* Because hw_cpu_id, unlike other paca fields, is accessed
* routinely from other CPUs (from the IRQ code), we stick to
......@@ -67,7 +70,8 @@ struct paca_struct {
*/
struct lppaca *lppaca_ptr; /* Pointer to LpPaca for PLIC */
#endif /* CONFIG_PPC_BOOK3S */
#endif /* CONFIG_PPC_PSERIES */
/*
* MAGIC: the spinlock functions in arch/powerpc/lib/locks.c
* load lock_token and paca_index with a single lwz
......@@ -160,10 +164,14 @@ struct paca_struct {
u64 saved_msr; /* MSR saved here by enter_rtas */
u16 trap_save; /* Used when bad stack is encountered */
u8 irq_soft_mask; /* mask for irq soft masking */
u8 soft_enabled; /* irq soft-enable flag */
u8 irq_happened; /* irq happened while soft-disabled */
u8 io_sync; /* writel() needs spin_unlock sync */
u8 irq_work_pending; /* IRQ_WORK interrupt while soft-disable */
u8 nap_state_lost; /* NV GPR values lost in power7_idle */
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
u8 pmcregs_in_use; /* pseries puts this in lppaca */
#endif
u64 sprg_vdso; /* Saved user-visible sprg */
#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
u64 tm_scratch; /* TM scratch area for reclaim */
......
......@@ -2,6 +2,8 @@
#ifndef _ASM_POWERPC_PLPAR_WRAPPERS_H
#define _ASM_POWERPC_PLPAR_WRAPPERS_H
#ifdef CONFIG_PPC_PSERIES
#include <linux/string.h>
#include <linux/irqflags.h>
......@@ -340,4 +342,6 @@ static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p)
return rc;
}
#endif /* CONFIG_PPC_PSERIES */
#endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */
......@@ -31,10 +31,21 @@ void ppc_enable_pmcs(void);
#ifdef CONFIG_PPC_BOOK3S_64
#include <asm/lppaca.h>
#include <asm/firmware.h>
static inline void ppc_set_pmu_inuse(int inuse)
{
get_lppaca()->pmcregs_in_use = inuse;
#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE)
if (firmware_has_feature(FW_FEATURE_LPAR)) {
#ifdef CONFIG_PPC_PSERIES
get_lppaca()->pmcregs_in_use = inuse;
#endif
} else {
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
get_paca()->pmcregs_in_use = inuse;
#endif
}
#endif
}
extern void power4_enable_pmcs(void);
......
......@@ -221,12 +221,17 @@ int main(void)
OFFSET(PACA_EXMC, paca_struct, exmc);
OFFSET(PACA_EXSLB, paca_struct, exslb);
OFFSET(PACA_EXNMI, paca_struct, exnmi);
#ifdef CONFIG_PPC_PSERIES
OFFSET(PACALPPACAPTR, paca_struct, lppaca_ptr);
#endif
OFFSET(PACA_SLBSHADOWPTR, paca_struct, slb_shadow_ptr);
OFFSET(SLBSHADOW_STACKVSID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].vsid);
OFFSET(SLBSHADOW_STACKESID, slb_shadow, save_area[SLB_NUM_BOLTED - 1].esid);
OFFSET(SLBSHADOW_SAVEAREA, slb_shadow, save_area);
OFFSET(LPPACA_PMCINUSE, lppaca, pmcregs_in_use);
#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
OFFSET(PACA_PMCINUSE, paca_struct, pmcregs_in_use);
#endif
OFFSET(LPPACA_DTLIDX, lppaca, dtl_idx);
OFFSET(LPPACA_YIELDCOUNT, lppaca, yield_count);
OFFSET(PACA_DTL_RIDX, paca_struct, dtl_ridx);
......
......@@ -20,7 +20,7 @@
#include "setup.h"
#ifdef CONFIG_PPC_BOOK3S
#ifdef CONFIG_PPC_PSERIES
/*
* The structure which the hypervisor knows about - this structure
......@@ -47,6 +47,9 @@ static long __initdata lppaca_size;
static void __init allocate_lppacas(int nr_cpus, unsigned long limit)
{
if (early_cpu_has_feature(CPU_FTR_HVMODE))
return;
if (nr_cpus <= NR_LPPACAS)
return;
......@@ -60,6 +63,9 @@ static struct lppaca * __init new_lppaca(int cpu)
{
struct lppaca *lp;
if (early_cpu_has_feature(CPU_FTR_HVMODE))
return NULL;
if (cpu < NR_LPPACAS)
return &lppaca[cpu];
......@@ -73,6 +79,9 @@ static void __init free_lppacas(void)
{
long new_size = 0, nr;
if (early_cpu_has_feature(CPU_FTR_HVMODE))
return;
if (!lppaca_size)
return;
nr = num_possible_cpus() - NR_LPPACAS;
......@@ -157,9 +166,10 @@ EXPORT_SYMBOL(paca);
void __init initialise_paca(struct paca_struct *new_paca, int cpu)
{
#ifdef CONFIG_PPC_BOOK3S
#ifdef CONFIG_PPC_PSERIES
new_paca->lppaca_ptr = new_lppaca(cpu);
#else
#endif
#ifdef CONFIG_PPC_BOOK3E
new_paca->kernel_pgd = swapper_pg_dir;
#endif
new_paca->lock_token = 0x8000;
......
......@@ -79,8 +79,7 @@ END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
li r5, 0
mtspr SPRN_MMCRA, r5
isync
ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
lbz r5, LPPACA_PMCINUSE(r3)
lbz r5, PACA_PMCINUSE(r13) /* is the host using the PMU? */
cmpwi r5, 0
beq 31f /* skip if not */
mfspr r5, SPRN_MMCR1
......
......@@ -113,8 +113,7 @@ END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
mtspr SPRN_SPRG_VDSO_WRITE,r3
/* Reload the host's PMU registers */
ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
lbz r4, LPPACA_PMCINUSE(r3)
lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */
cmpwi r4, 0
beq 23f /* skip if not */
BEGIN_FTR_SECTION
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment