powerpc: Shield code specific to 64-bit server processors

This is a random collection of added ifdef's around portions of
code that only mak sense on server processors. Using either
CONFIG_PPC_STD_MMU_64 or CONFIG_PPC_BOOK3S as seems appropriate.

This is meant to make the future merging of Book3E 64-bit support
easier.
Signed-off-by: default avatarBenjamin Herrenschmidt <benh@kernel.crashing.org>
parent 91c60b5b
...@@ -20,6 +20,11 @@ ...@@ -20,6 +20,11 @@
#define _ASM_POWERPC_LPPACA_H #define _ASM_POWERPC_LPPACA_H
#ifdef __KERNEL__ #ifdef __KERNEL__
/* These definitions relate to hypervisors that only exist when using
* a server type processor
*/
#ifdef CONFIG_PPC_BOOK3S
//============================================================================= //=============================================================================
// //
// This control block contains the data that is shared between the // This control block contains the data that is shared between the
...@@ -158,5 +163,6 @@ struct slb_shadow { ...@@ -158,5 +163,6 @@ struct slb_shadow {
extern struct slb_shadow slb_shadow[]; extern struct slb_shadow slb_shadow[];
#endif /* CONFIG_PPC_BOOK3S */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_POWERPC_LPPACA_H */ #endif /* _ASM_POWERPC_LPPACA_H */
...@@ -74,10 +74,10 @@ extern void early_init_mmu_secondary(void); ...@@ -74,10 +74,10 @@ extern void early_init_mmu_secondary(void);
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
#ifdef CONFIG_PPC64 #if defined(CONFIG_PPC_STD_MMU_64)
/* 64-bit classic hash table MMU */ /* 64-bit classic hash table MMU */
# include <asm/mmu-hash64.h> # include <asm/mmu-hash64.h>
#elif defined(CONFIG_PPC_STD_MMU) #elif defined(CONFIG_PPC_STD_MMU_32)
/* 32-bit classic hash table MMU */ /* 32-bit classic hash table MMU */
# include <asm/mmu-hash32.h> # include <asm/mmu-hash32.h>
#elif defined(CONFIG_40x) #elif defined(CONFIG_40x)
......
...@@ -31,9 +31,11 @@ ...@@ -31,9 +31,11 @@
#error TASK_SIZE_USER64 exceeds pagetable range #error TASK_SIZE_USER64 exceeds pagetable range
#endif #endif
#ifdef CONFIG_PPC_STD_MMU_64
#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT)) #if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT))
#error TASK_SIZE_USER64 exceeds user VSID range #error TASK_SIZE_USER64 exceeds user VSID range
#endif #endif
#endif
/* /*
* Define the address range of the vmalloc VM area. * Define the address range of the vmalloc VM area.
...@@ -199,8 +201,11 @@ static inline unsigned long pte_update(struct mm_struct *mm, ...@@ -199,8 +201,11 @@ static inline unsigned long pte_update(struct mm_struct *mm,
if (!huge) if (!huge)
assert_pte_locked(mm, addr); assert_pte_locked(mm, addr);
#ifdef CONFIG_PPC_STD_MMU_64
if (old & _PAGE_HASHPTE) if (old & _PAGE_HASHPTE)
hpte_need_flush(mm, addr, ptep, old, huge); hpte_need_flush(mm, addr, ptep, old, huge);
#endif
return old; return old;
} }
......
...@@ -117,6 +117,7 @@ notrace void raw_local_irq_restore(unsigned long en) ...@@ -117,6 +117,7 @@ notrace void raw_local_irq_restore(unsigned long en)
if (!en) if (!en)
return; return;
#ifdef CONFIG_PPC_STD_MMU_64
if (firmware_has_feature(FW_FEATURE_ISERIES)) { if (firmware_has_feature(FW_FEATURE_ISERIES)) {
/* /*
* Do we need to disable preemption here? Not really: in the * Do we need to disable preemption here? Not really: in the
...@@ -134,6 +135,7 @@ notrace void raw_local_irq_restore(unsigned long en) ...@@ -134,6 +135,7 @@ notrace void raw_local_irq_restore(unsigned long en)
if (local_paca->lppaca_ptr->int_dword.any_int) if (local_paca->lppaca_ptr->int_dword.any_int)
iseries_handle_interrupts(); iseries_handle_interrupts();
} }
#endif /* CONFIG_PPC_STD_MMU_64 */
/* /*
* if (get_paca()->hard_enabled) return; * if (get_paca()->hard_enabled) return;
......
...@@ -420,6 +420,9 @@ int pcibios_unmap_io_space(struct pci_bus *bus) ...@@ -420,6 +420,9 @@ int pcibios_unmap_io_space(struct pci_bus *bus)
* so flushing the hash table is the only sane way to make sure * so flushing the hash table is the only sane way to make sure
* that no hash entries are covering that removed bridge area * that no hash entries are covering that removed bridge area
* while still allowing other busses overlapping those pages * while still allowing other busses overlapping those pages
*
* Note: If we ever support P2P hotplug on Book3E, we'll have
* to do an appropriate TLB flush here too
*/ */
if (bus->self) { if (bus->self) {
struct resource *res = bus->resource[0]; struct resource *res = bus->resource[0];
...@@ -427,8 +430,10 @@ int pcibios_unmap_io_space(struct pci_bus *bus) ...@@ -427,8 +430,10 @@ int pcibios_unmap_io_space(struct pci_bus *bus)
pr_debug("IO unmapping for PCI-PCI bridge %s\n", pr_debug("IO unmapping for PCI-PCI bridge %s\n",
pci_name(bus->self)); pci_name(bus->self));
#ifdef CONFIG_PPC_STD_MMU_64
__flush_hash_table_range(&init_mm, res->start + _IO_BASE, __flush_hash_table_range(&init_mm, res->start + _IO_BASE,
res->end + _IO_BASE + 1); res->end + _IO_BASE + 1);
#endif
return 0; return 0;
} }
......
...@@ -650,7 +650,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, ...@@ -650,7 +650,7 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
p->thread.ksp_limit = (unsigned long)task_stack_page(p) + p->thread.ksp_limit = (unsigned long)task_stack_page(p) +
_ALIGN_UP(sizeof(struct thread_info), 16); _ALIGN_UP(sizeof(struct thread_info), 16);
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC_STD_MMU_64
if (cpu_has_feature(CPU_FTR_SLB)) { if (cpu_has_feature(CPU_FTR_SLB)) {
unsigned long sp_vsid; unsigned long sp_vsid;
unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp; unsigned long llp = mmu_psize_defs[mmu_linear_psize].sllp;
......
...@@ -585,7 +585,7 @@ static void __init check_cpu_pa_features(unsigned long node) ...@@ -585,7 +585,7 @@ static void __init check_cpu_pa_features(unsigned long node)
ibm_pa_features, ARRAY_SIZE(ibm_pa_features)); ibm_pa_features, ARRAY_SIZE(ibm_pa_features));
} }
#ifdef CONFIG_PPC64 #ifdef CONFIG_PPC_STD_MMU_64
static void __init check_cpu_slb_size(unsigned long node) static void __init check_cpu_slb_size(unsigned long node)
{ {
u32 *slb_size_ptr; u32 *slb_size_ptr;
......
...@@ -417,9 +417,11 @@ void __init setup_system(void) ...@@ -417,9 +417,11 @@ void __init setup_system(void)
if (ppc64_caches.iline_size != 0x80) if (ppc64_caches.iline_size != 0x80)
printk("ppc64_caches.icache_line_size = 0x%x\n", printk("ppc64_caches.icache_line_size = 0x%x\n",
ppc64_caches.iline_size); ppc64_caches.iline_size);
#ifdef CONFIG_PPC_STD_MMU_64
if (htab_address) if (htab_address)
printk("htab_address = 0x%p\n", htab_address); printk("htab_address = 0x%p\n", htab_address);
printk("htab_hash_mask = 0x%lx\n", htab_hash_mask); printk("htab_hash_mask = 0x%lx\n", htab_hash_mask);
#endif /* CONFIG_PPC_STD_MMU_64 */
if (PHYSICAL_START > 0) if (PHYSICAL_START > 0)
printk("physical_start = 0x%lx\n", printk("physical_start = 0x%lx\n",
PHYSICAL_START); PHYSICAL_START);
...@@ -511,8 +513,9 @@ void __init setup_arch(char **cmdline_p) ...@@ -511,8 +513,9 @@ void __init setup_arch(char **cmdline_p)
irqstack_early_init(); irqstack_early_init();
emergency_stack_init(); emergency_stack_init();
#ifdef CONFIG_PPC_STD_MMU_64
stabs_alloc(); stabs_alloc();
#endif
/* set up the bootmem stuff with available memory */ /* set up the bootmem stuff with available memory */
do_init_bootmem(); do_init_bootmem();
sparse_init(); sparse_init();
......
...@@ -11,10 +11,11 @@ obj-y := fault.o mem.o pgtable.o gup.o \ ...@@ -11,10 +11,11 @@ obj-y := fault.o mem.o pgtable.o gup.o \
pgtable_$(CONFIG_WORD_SIZE).o pgtable_$(CONFIG_WORD_SIZE).o
obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \ obj-$(CONFIG_PPC_MMU_NOHASH) += mmu_context_nohash.o tlb_nohash.o \
tlb_nohash_low.o tlb_nohash_low.o
hash-$(CONFIG_PPC_NATIVE) := hash_native_64.o obj-$(CONFIG_PPC64) += mmap_64.o
obj-$(CONFIG_PPC64) += hash_utils_64.o \ hash64-$(CONFIG_PPC_NATIVE) := hash_native_64.o
obj-$(CONFIG_PPC_STD_MMU_64) += hash_utils_64.o \
slb_low.o slb.o stab.o \ slb_low.o slb.o stab.o \
mmap_64.o $(hash-y) mmap_64.o $(hash64-y)
obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o obj-$(CONFIG_PPC_STD_MMU_32) += ppc_mmu_32.o
obj-$(CONFIG_PPC_STD_MMU) += hash_low_$(CONFIG_WORD_SIZE).o \ obj-$(CONFIG_PPC_STD_MMU) += hash_low_$(CONFIG_WORD_SIZE).o \
tlb_hash$(CONFIG_WORD_SIZE).o \ tlb_hash$(CONFIG_WORD_SIZE).o \
......
...@@ -66,6 +66,7 @@ ...@@ -66,6 +66,7 @@
#include "mmu_decl.h" #include "mmu_decl.h"
#ifdef CONFIG_PPC_STD_MMU_64
#if PGTABLE_RANGE > USER_VSID_RANGE #if PGTABLE_RANGE > USER_VSID_RANGE
#warning Limited user VSID range means pagetable space is wasted #warning Limited user VSID range means pagetable space is wasted
#endif #endif
...@@ -73,6 +74,7 @@ ...@@ -73,6 +74,7 @@
#if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE) #if (TASK_SIZE_USER64 < PGTABLE_RANGE) && (TASK_SIZE_USER64 < USER_VSID_RANGE)
#warning TASK_SIZE is smaller than it needs to be. #warning TASK_SIZE is smaller than it needs to be.
#endif #endif
#endif /* CONFIG_PPC_STD_MMU_64 */
phys_addr_t memstart_addr = ~0; phys_addr_t memstart_addr = ~0;
phys_addr_t kernstart_addr; phys_addr_t kernstart_addr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment