Commit c13066e5 authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/nohash: Remove CONFIG_SMP #ifdefery in mmu_context.h

Everything can be done even when CONFIG_SMP is not selected.

Just use IS_ENABLED() where relevant and rely on GCC to
opt out unneeded code and variables when CONFIG_SMP is not set.
Signed-off-by: default avatarChristophe Leroy <christophe.leroy@csgroup.eu>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
Link: https://lore.kernel.org/r/cc13b87b0f750a538621876ecc24c22a07e7c8be.1622712515.git.christophe.leroy@csgroup.eu
parent a56ab7c7
...@@ -47,6 +47,7 @@ ...@@ -47,6 +47,7 @@
#include <asm/mmu_context.h> #include <asm/mmu_context.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/smp.h>
#include <mm/mmu_decl.h> #include <mm/mmu_decl.h>
...@@ -86,9 +87,7 @@ void *abatron_pteptrs[2]; ...@@ -86,9 +87,7 @@ void *abatron_pteptrs[2];
static unsigned int next_context, nr_free_contexts; static unsigned int next_context, nr_free_contexts;
static unsigned long *context_map; static unsigned long *context_map;
#ifdef CONFIG_SMP
static unsigned long *stale_map[NR_CPUS]; static unsigned long *stale_map[NR_CPUS];
#endif
static struct mm_struct **context_mm; static struct mm_struct **context_mm;
static DEFINE_RAW_SPINLOCK(context_lock); static DEFINE_RAW_SPINLOCK(context_lock);
...@@ -111,7 +110,6 @@ static DEFINE_RAW_SPINLOCK(context_lock); ...@@ -111,7 +110,6 @@ static DEFINE_RAW_SPINLOCK(context_lock);
* the stale map as we can just flush the local CPU * the stale map as we can just flush the local CPU
* -- benh * -- benh
*/ */
#ifdef CONFIG_SMP
static unsigned int steal_context_smp(unsigned int id) static unsigned int steal_context_smp(unsigned int id)
{ {
struct mm_struct *mm; struct mm_struct *mm;
...@@ -164,14 +162,11 @@ static unsigned int steal_context_smp(unsigned int id) ...@@ -164,14 +162,11 @@ static unsigned int steal_context_smp(unsigned int id)
/* This will cause the caller to try again */ /* This will cause the caller to try again */
return MMU_NO_CONTEXT; return MMU_NO_CONTEXT;
} }
#endif /* CONFIG_SMP */
static unsigned int steal_all_contexts(void) static unsigned int steal_all_contexts(void)
{ {
struct mm_struct *mm; struct mm_struct *mm;
#ifdef CONFIG_SMP
int cpu = smp_processor_id(); int cpu = smp_processor_id();
#endif
unsigned int id; unsigned int id;
for (id = FIRST_CONTEXT; id <= LAST_CONTEXT; id++) { for (id = FIRST_CONTEXT; id <= LAST_CONTEXT; id++) {
...@@ -189,9 +184,8 @@ static unsigned int steal_all_contexts(void) ...@@ -189,9 +184,8 @@ static unsigned int steal_all_contexts(void)
mm->context.active = 0; mm->context.active = 0;
#endif #endif
} }
#ifdef CONFIG_SMP if (IS_ENABLED(CONFIG_SMP))
__clear_bit(id, stale_map[cpu]); __clear_bit(id, stale_map[cpu]);
#endif
} }
/* Flush the TLB for all contexts (not to be used on SMP) */ /* Flush the TLB for all contexts (not to be used on SMP) */
...@@ -210,9 +204,7 @@ static unsigned int steal_all_contexts(void) ...@@ -210,9 +204,7 @@ static unsigned int steal_all_contexts(void)
static unsigned int steal_context_up(unsigned int id) static unsigned int steal_context_up(unsigned int id)
{ {
struct mm_struct *mm; struct mm_struct *mm;
#ifdef CONFIG_SMP
int cpu = smp_processor_id(); int cpu = smp_processor_id();
#endif
/* Pick up the victim mm */ /* Pick up the victim mm */
mm = context_mm[id]; mm = context_mm[id];
...@@ -226,9 +218,8 @@ static unsigned int steal_context_up(unsigned int id) ...@@ -226,9 +218,8 @@ static unsigned int steal_context_up(unsigned int id)
mm->context.id = MMU_NO_CONTEXT; mm->context.id = MMU_NO_CONTEXT;
/* XXX This clear should ultimately be part of local_flush_tlb_mm */ /* XXX This clear should ultimately be part of local_flush_tlb_mm */
#ifdef CONFIG_SMP if (IS_ENABLED(CONFIG_SMP))
__clear_bit(id, stale_map[cpu]); __clear_bit(id, stale_map[cpu]);
#endif
return id; return id;
} }
...@@ -295,9 +286,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, ...@@ -295,9 +286,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
struct task_struct *tsk) struct task_struct *tsk)
{ {
unsigned int id; unsigned int id;
#ifdef CONFIG_SMP
unsigned int i, cpu = smp_processor_id(); unsigned int i, cpu = smp_processor_id();
#endif
unsigned long *map; unsigned long *map;
/* No lockless fast path .. yet */ /* No lockless fast path .. yet */
...@@ -306,17 +295,17 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, ...@@ -306,17 +295,17 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
pr_hard("[%d] activating context for mm @%p, active=%d, id=%d", pr_hard("[%d] activating context for mm @%p, active=%d, id=%d",
cpu, next, next->context.active, next->context.id); cpu, next, next->context.active, next->context.id);
#ifdef CONFIG_SMP if (IS_ENABLED(CONFIG_SMP)) {
/* Mark us active and the previous one not anymore */ /* Mark us active and the previous one not anymore */
next->context.active++; next->context.active++;
if (prev) { if (prev) {
pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active); pr_hardcont(" (old=0x%p a=%d)", prev, prev->context.active);
WARN_ON(prev->context.active < 1); WARN_ON(prev->context.active < 1);
prev->context.active--; prev->context.active--;
}
} }
again: again:
#endif /* CONFIG_SMP */
/* If we already have a valid assigned context, skip all that */ /* If we already have a valid assigned context, skip all that */
id = next->context.id; id = next->context.id;
...@@ -337,14 +326,12 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, ...@@ -337,14 +326,12 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
/* No more free contexts, let's try to steal one */ /* No more free contexts, let's try to steal one */
if (nr_free_contexts == 0) { if (nr_free_contexts == 0) {
#ifdef CONFIG_SMP
if (num_online_cpus() > 1) { if (num_online_cpus() > 1) {
id = steal_context_smp(id); id = steal_context_smp(id);
if (id == MMU_NO_CONTEXT) if (id == MMU_NO_CONTEXT)
goto again; goto again;
goto stolen; goto stolen;
} }
#endif /* CONFIG_SMP */
if (IS_ENABLED(CONFIG_PPC_8xx)) if (IS_ENABLED(CONFIG_PPC_8xx))
id = steal_all_contexts(); id = steal_all_contexts();
else else
...@@ -371,8 +358,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, ...@@ -371,8 +358,7 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
/* If that context got marked stale on this CPU, then flush the /* If that context got marked stale on this CPU, then flush the
* local TLB for it and unmark it before we use it * local TLB for it and unmark it before we use it
*/ */
#ifdef CONFIG_SMP if (IS_ENABLED(CONFIG_SMP) && test_bit(id, stale_map[cpu])) {
if (test_bit(id, stale_map[cpu])) {
pr_hardcont(" | stale flush %d [%d..%d]", pr_hardcont(" | stale flush %d [%d..%d]",
id, cpu_first_thread_sibling(cpu), id, cpu_first_thread_sibling(cpu),
cpu_last_thread_sibling(cpu)); cpu_last_thread_sibling(cpu));
...@@ -386,7 +372,6 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next, ...@@ -386,7 +372,6 @@ void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next,
__clear_bit(id, stale_map[i]); __clear_bit(id, stale_map[i]);
} }
} }
#endif
/* Flick the MMU and release lock */ /* Flick the MMU and release lock */
pr_hardcont(" -> %d\n", id); pr_hardcont(" -> %d\n", id);
...@@ -445,7 +430,6 @@ void destroy_context(struct mm_struct *mm) ...@@ -445,7 +430,6 @@ void destroy_context(struct mm_struct *mm)
raw_spin_unlock_irqrestore(&context_lock, flags); raw_spin_unlock_irqrestore(&context_lock, flags);
} }
#ifdef CONFIG_SMP
static int mmu_ctx_cpu_prepare(unsigned int cpu) static int mmu_ctx_cpu_prepare(unsigned int cpu)
{ {
/* We don't touch CPU 0 map, it's allocated at aboot and kept /* We don't touch CPU 0 map, it's allocated at aboot and kept
...@@ -475,8 +459,6 @@ static int mmu_ctx_cpu_dead(unsigned int cpu) ...@@ -475,8 +459,6 @@ static int mmu_ctx_cpu_dead(unsigned int cpu)
return 0; return 0;
} }
#endif /* CONFIG_SMP */
/* /*
* Initialize the context management stuff. * Initialize the context management stuff.
*/ */
...@@ -500,16 +482,16 @@ void __init mmu_context_init(void) ...@@ -500,16 +482,16 @@ void __init mmu_context_init(void)
if (!context_mm) if (!context_mm)
panic("%s: Failed to allocate %zu bytes\n", __func__, panic("%s: Failed to allocate %zu bytes\n", __func__,
sizeof(void *) * (LAST_CONTEXT + 1)); sizeof(void *) * (LAST_CONTEXT + 1));
#ifdef CONFIG_SMP if (IS_ENABLED(CONFIG_SMP)) {
stale_map[boot_cpuid] = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES); stale_map[boot_cpuid] = memblock_alloc(CTX_MAP_SIZE, SMP_CACHE_BYTES);
if (!stale_map[boot_cpuid]) if (!stale_map[boot_cpuid])
panic("%s: Failed to allocate %zu bytes\n", __func__, panic("%s: Failed to allocate %zu bytes\n", __func__,
CTX_MAP_SIZE); CTX_MAP_SIZE);
cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE, cpuhp_setup_state_nocalls(CPUHP_POWERPC_MMU_CTX_PREPARE,
"powerpc/mmu/ctx:prepare", "powerpc/mmu/ctx:prepare",
mmu_ctx_cpu_prepare, mmu_ctx_cpu_dead); mmu_ctx_cpu_prepare, mmu_ctx_cpu_dead);
#endif }
printk(KERN_INFO printk(KERN_INFO
"MMU: Allocated %zu bytes of context maps for %d contexts\n", "MMU: Allocated %zu bytes of context maps for %d contexts\n",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment