Commit a15cd063 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc

Pull powerpc fixes from Ben Herrenschmidt:
 "Here's a few powerpc fixes for 3.9, mostly regressions (though not all
  from 3.9 merge window) that we've been hammering into shape over the
  last couple of weeks.  They fix booting on Cell and G5 among other
  things (yes, we've been a bit sloppy with older machines this time
  around)."

* 'merge' of git://git.kernel.org/pub/scm/linux/kernel/git/benh/powerpc:
  powerpc: Rename USER_ESID_BITS* to ESID_BITS*
  powerpc: Update kernel VSID range
  powerpc: Make VSID_BITS* dependency explicit
  powerpc: Make sure that we alays include CONFIG_BINFMT_ELF
  powerpc/ptrace: Fix brk.len used uninitialised
  powerpc: Fix -mcmodel=medium breakage in prom_init.c
  powerpc: Remove last traces of POWER4_ONLY
  powerpc: Fix cputable entry for 970MP rev 1.0
  powerpc: Fix STAB initialization
parents 6210d421 af81d787
...@@ -90,6 +90,7 @@ config GENERIC_GPIO ...@@ -90,6 +90,7 @@ config GENERIC_GPIO
config PPC config PPC
bool bool
default y default y
select BINFMT_ELF
select OF select OF
select OF_EARLY_FLATTREE select OF_EARLY_FLATTREE
select HAVE_FTRACE_MCOUNT_RECORD select HAVE_FTRACE_MCOUNT_RECORD
......
...@@ -343,17 +343,16 @@ extern void slb_set_size(u16 size); ...@@ -343,17 +343,16 @@ extern void slb_set_size(u16 size);
/* /*
* VSID allocation (256MB segment) * VSID allocation (256MB segment)
* *
* We first generate a 38-bit "proto-VSID". For kernel addresses this * We first generate a 37-bit "proto-VSID". Proto-VSIDs are generated
* is equal to the ESID | 1 << 37, for user addresses it is: * from mmu context id and effective segment id of the address.
* (context << USER_ESID_BITS) | (esid & ((1U << USER_ESID_BITS) - 1)
* *
* This splits the proto-VSID into the below range * For user processes max context id is limited to ((1ul << 19) - 5)
* 0 - (2^(CONTEXT_BITS + USER_ESID_BITS) - 1) : User proto-VSID range * for kernel space, we use the top 4 context ids to map address as below
* 2^(CONTEXT_BITS + USER_ESID_BITS) - 2^(VSID_BITS) : Kernel proto-VSID range * NOTE: each context only support 64TB now.
* * 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ]
* We also have CONTEXT_BITS + USER_ESID_BITS = VSID_BITS - 1 * 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ]
* That is, we assign half of the space to user processes and half * 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ]
* to the kernel. * 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ]
* *
* The proto-VSIDs are then scrambled into real VSIDs with the * The proto-VSIDs are then scrambled into real VSIDs with the
* multiplicative hash: * multiplicative hash:
...@@ -363,41 +362,49 @@ extern void slb_set_size(u16 size); ...@@ -363,41 +362,49 @@ extern void slb_set_size(u16 size);
* VSID_MULTIPLIER is prime, so in particular it is * VSID_MULTIPLIER is prime, so in particular it is
* co-prime to VSID_MODULUS, making this a 1:1 scrambling function. * co-prime to VSID_MODULUS, making this a 1:1 scrambling function.
* Because the modulus is 2^n-1 we can compute it efficiently without * Because the modulus is 2^n-1 we can compute it efficiently without
* a divide or extra multiply (see below). * a divide or extra multiply (see below). The scramble function gives
* * robust scattering in the hash table (at least based on some initial
* This scheme has several advantages over older methods: * results).
*
* - We have VSIDs allocated for every kernel address
* (i.e. everything above 0xC000000000000000), except the very top
* segment, which simplifies several things.
* *
* - We allow for USER_ESID_BITS significant bits of ESID and * We also consider VSID 0 special. We use VSID 0 for slb entries mapping
* CONTEXT_BITS bits of context for user addresses. * bad address. This enables us to consolidate bad address handling in
* i.e. 64T (46 bits) of address space for up to half a million contexts. * hash_page.
* *
* - The scramble function gives robust scattering in the hash * We also need to avoid the last segment of the last context, because that
* table (at least based on some initial results). The previous * would give a protovsid of 0x1fffffffff. That will result in a VSID 0
* method was more susceptible to pathological cases giving excessive * because of the modulo operation in vsid scramble. But the vmemmap
* hash collisions. * (which is what uses region 0xf) will never be close to 64TB in size
* (it's 56 bytes per page of system memory).
*/ */
#define CONTEXT_BITS 19
#define ESID_BITS 18
#define ESID_BITS_1T 6
/*
* 256MB segment
* The proto-VSID space has 2^(CONTEX_BITS + ESID_BITS) - 1 segments
* available for user + kernel mapping. The top 4 contexts are used for
* kernel mapping. Each segment contains 2^28 bytes. Each
* context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
* (19 == 37 + 28 - 46).
*/
#define MAX_USER_CONTEXT ((ASM_CONST(1) << CONTEXT_BITS) - 5)
/* /*
* This should be computed such that protovosid * vsid_mulitplier * This should be computed such that protovosid * vsid_mulitplier
* doesn't overflow 64 bits. It should also be co-prime to vsid_modulus * doesn't overflow 64 bits. It should also be co-prime to vsid_modulus
*/ */
#define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */ #define VSID_MULTIPLIER_256M ASM_CONST(12538073) /* 24-bit prime */
#define VSID_BITS_256M 38 #define VSID_BITS_256M (CONTEXT_BITS + ESID_BITS)
#define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1) #define VSID_MODULUS_256M ((1UL<<VSID_BITS_256M)-1)
#define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */ #define VSID_MULTIPLIER_1T ASM_CONST(12538073) /* 24-bit prime */
#define VSID_BITS_1T 26 #define VSID_BITS_1T (CONTEXT_BITS + ESID_BITS_1T)
#define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1) #define VSID_MODULUS_1T ((1UL<<VSID_BITS_1T)-1)
#define CONTEXT_BITS 19
#define USER_ESID_BITS 18
#define USER_ESID_BITS_1T 6
#define USER_VSID_RANGE (1UL << (USER_ESID_BITS + SID_SHIFT)) #define USER_VSID_RANGE (1UL << (ESID_BITS + SID_SHIFT))
/* /*
* This macro generates asm code to compute the VSID scramble * This macro generates asm code to compute the VSID scramble
...@@ -421,7 +428,8 @@ extern void slb_set_size(u16 size); ...@@ -421,7 +428,8 @@ extern void slb_set_size(u16 size);
srdi rx,rt,VSID_BITS_##size; \ srdi rx,rt,VSID_BITS_##size; \
clrldi rt,rt,(64-VSID_BITS_##size); \ clrldi rt,rt,(64-VSID_BITS_##size); \
add rt,rt,rx; /* add high and low bits */ \ add rt,rt,rx; /* add high and low bits */ \
/* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \ /* NOTE: explanation based on VSID_BITS_##size = 36 \
* Now, r3 == VSID (mod 2^36-1), and lies between 0 and \
* 2^36-1+2^28-1. That in particular means that if r3 >= \ * 2^36-1+2^28-1. That in particular means that if r3 >= \
* 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \ * 2^36-1, then r3+1 has the 2^36 bit set. So, if r3+1 has \
* the bit clear, r3 already has the answer we want, if it \ * the bit clear, r3 already has the answer we want, if it \
...@@ -513,34 +521,6 @@ typedef struct { ...@@ -513,34 +521,6 @@ typedef struct {
}) })
#endif /* 1 */ #endif /* 1 */
/*
* This is only valid for addresses >= PAGE_OFFSET
* The proto-VSID space is divided into two class
* User: 0 to 2^(CONTEXT_BITS + USER_ESID_BITS) -1
* kernel: 2^(CONTEXT_BITS + USER_ESID_BITS) to 2^(VSID_BITS) - 1
*
* With KERNEL_START at 0xc000000000000000, the proto vsid for
* the kernel ends up with 0xc00000000 (36 bits). With 64TB
* support we need to have kernel proto-VSID in the
* [2^37 to 2^38 - 1] range due to the increased USER_ESID_BITS.
*/
static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
{
unsigned long proto_vsid;
/*
* We need to make sure proto_vsid for the kernel is
* >= 2^(CONTEXT_BITS + USER_ESID_BITS[_1T])
*/
if (ssize == MMU_SEGSIZE_256M) {
proto_vsid = ea >> SID_SHIFT;
proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS));
return vsid_scramble(proto_vsid, 256M);
}
proto_vsid = ea >> SID_SHIFT_1T;
proto_vsid |= (1UL << (CONTEXT_BITS + USER_ESID_BITS_1T));
return vsid_scramble(proto_vsid, 1T);
}
/* Returns the segment size indicator for a user address */ /* Returns the segment size indicator for a user address */
static inline int user_segment_size(unsigned long addr) static inline int user_segment_size(unsigned long addr)
{ {
...@@ -550,17 +530,41 @@ static inline int user_segment_size(unsigned long addr) ...@@ -550,17 +530,41 @@ static inline int user_segment_size(unsigned long addr)
return MMU_SEGSIZE_256M; return MMU_SEGSIZE_256M;
} }
/* This is only valid for user addresses (which are below 2^44) */
static inline unsigned long get_vsid(unsigned long context, unsigned long ea, static inline unsigned long get_vsid(unsigned long context, unsigned long ea,
int ssize) int ssize)
{ {
/*
* Bad address. We return VSID 0 for that
*/
if ((ea & ~REGION_MASK) >= PGTABLE_RANGE)
return 0;
if (ssize == MMU_SEGSIZE_256M) if (ssize == MMU_SEGSIZE_256M)
return vsid_scramble((context << USER_ESID_BITS) return vsid_scramble((context << ESID_BITS)
| (ea >> SID_SHIFT), 256M); | (ea >> SID_SHIFT), 256M);
return vsid_scramble((context << USER_ESID_BITS_1T) return vsid_scramble((context << ESID_BITS_1T)
| (ea >> SID_SHIFT_1T), 1T); | (ea >> SID_SHIFT_1T), 1T);
} }
/*
* This is only valid for addresses >= PAGE_OFFSET
*
* For kernel space, we use the top 4 context ids to map address as below
* 0x7fffc - [ 0xc000000000000000 - 0xc0003fffffffffff ]
* 0x7fffd - [ 0xd000000000000000 - 0xd0003fffffffffff ]
* 0x7fffe - [ 0xe000000000000000 - 0xe0003fffffffffff ]
* 0x7ffff - [ 0xf000000000000000 - 0xf0003fffffffffff ]
*/
static inline unsigned long get_kernel_vsid(unsigned long ea, int ssize)
{
unsigned long context;
/*
* kernel take the top 4 context from the available range
*/
context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1;
return get_vsid(context, ea, ssize);
}
#endif /* __ASSEMBLY__ */ #endif /* __ASSEMBLY__ */
#endif /* _ASM_POWERPC_MMU_HASH64_H_ */ #endif /* _ASM_POWERPC_MMU_HASH64_H_ */
...@@ -275,7 +275,7 @@ static struct cpu_spec __initdata cpu_specs[] = { ...@@ -275,7 +275,7 @@ static struct cpu_spec __initdata cpu_specs[] = {
.cpu_features = CPU_FTRS_PPC970, .cpu_features = CPU_FTRS_PPC970,
.cpu_user_features = COMMON_USER_POWER4 | .cpu_user_features = COMMON_USER_POWER4 |
PPC_FEATURE_HAS_ALTIVEC_COMP, PPC_FEATURE_HAS_ALTIVEC_COMP,
.mmu_features = MMU_FTR_HPTE_TABLE, .mmu_features = MMU_FTRS_PPC970,
.icache_bsize = 128, .icache_bsize = 128,
.dcache_bsize = 128, .dcache_bsize = 128,
.num_pmcs = 8, .num_pmcs = 8,
......
...@@ -1452,20 +1452,36 @@ do_ste_alloc: ...@@ -1452,20 +1452,36 @@ do_ste_alloc:
_GLOBAL(do_stab_bolted) _GLOBAL(do_stab_bolted)
stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */ stw r9,PACA_EXSLB+EX_CCR(r13) /* save CR in exc. frame */
std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */ std r11,PACA_EXSLB+EX_SRR0(r13) /* save SRR0 in exc. frame */
mfspr r11,SPRN_DAR /* ea */
/*
* check for bad kernel/user address
* (ea & ~REGION_MASK) >= PGTABLE_RANGE
*/
rldicr. r9,r11,4,(63 - 46 - 4)
li r9,0 /* VSID = 0 for bad address */
bne- 0f
/*
* Calculate VSID:
* This is the kernel vsid, we take the top for context from
* the range. context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
* Here we know that (ea >> 60) == 0xc
*/
lis r9,(MAX_USER_CONTEXT + 1)@ha
addi r9,r9,(MAX_USER_CONTEXT + 1)@l
srdi r10,r11,SID_SHIFT
rldimi r10,r9,ESID_BITS,0 /* proto vsid */
ASM_VSID_SCRAMBLE(r10, r9, 256M)
rldic r9,r10,12,16 /* r9 = vsid << 12 */
0:
/* Hash to the primary group */ /* Hash to the primary group */
ld r10,PACASTABVIRT(r13) ld r10,PACASTABVIRT(r13)
mfspr r11,SPRN_DAR srdi r11,r11,SID_SHIFT
srdi r11,r11,28
rldimi r10,r11,7,52 /* r10 = first ste of the group */ rldimi r10,r11,7,52 /* r10 = first ste of the group */
/* Calculate VSID */
/* This is a kernel address, so protovsid = ESID | 1 << 37 */
li r9,0x1
rldimi r11,r9,(CONTEXT_BITS + USER_ESID_BITS),0
ASM_VSID_SCRAMBLE(r11, r9, 256M)
rldic r9,r11,12,16 /* r9 = vsid << 12 */
/* Search the primary group for a free entry */ /* Search the primary group for a free entry */
1: ld r11,0(r10) /* Test valid bit of the current ste */ 1: ld r11,0(r10) /* Test valid bit of the current ste */
andi. r11,r11,0x80 andi. r11,r11,0x80
......
...@@ -2832,11 +2832,13 @@ static void unreloc_toc(void) ...@@ -2832,11 +2832,13 @@ static void unreloc_toc(void)
{ {
} }
#else #else
static void __reloc_toc(void *tocstart, unsigned long offset, static void __reloc_toc(unsigned long offset, unsigned long nr_entries)
unsigned long nr_entries)
{ {
unsigned long i; unsigned long i;
unsigned long *toc_entry = (unsigned long *)tocstart; unsigned long *toc_entry;
/* Get the start of the TOC by using r2 directly. */
asm volatile("addi %0,2,-0x8000" : "=b" (toc_entry));
for (i = 0; i < nr_entries; i++) { for (i = 0; i < nr_entries; i++) {
*toc_entry = *toc_entry + offset; *toc_entry = *toc_entry + offset;
...@@ -2850,8 +2852,7 @@ static void reloc_toc(void) ...@@ -2850,8 +2852,7 @@ static void reloc_toc(void)
unsigned long nr_entries = unsigned long nr_entries =
(__prom_init_toc_end - __prom_init_toc_start) / sizeof(long); (__prom_init_toc_end - __prom_init_toc_start) / sizeof(long);
/* Need to add offset to get at __prom_init_toc_start */ __reloc_toc(offset, nr_entries);
__reloc_toc(__prom_init_toc_start + offset, offset, nr_entries);
mb(); mb();
} }
...@@ -2864,8 +2865,7 @@ static void unreloc_toc(void) ...@@ -2864,8 +2865,7 @@ static void unreloc_toc(void)
mb(); mb();
/* __prom_init_toc_start has been relocated, no need to add offset */ __reloc_toc(-offset, nr_entries);
__reloc_toc(__prom_init_toc_start, -offset, nr_entries);
} }
#endif #endif
#endif #endif
......
...@@ -1428,6 +1428,7 @@ static long ppc_set_hwdebug(struct task_struct *child, ...@@ -1428,6 +1428,7 @@ static long ppc_set_hwdebug(struct task_struct *child,
brk.address = bp_info->addr & ~7UL; brk.address = bp_info->addr & ~7UL;
brk.type = HW_BRK_TYPE_TRANSLATE; brk.type = HW_BRK_TYPE_TRANSLATE;
brk.len = 8;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ) if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_READ)
brk.type |= HW_BRK_TYPE_READ; brk.type |= HW_BRK_TYPE_READ;
if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE) if (bp_info->trigger_type & PPC_BREAKPOINT_TRIGGER_WRITE)
......
...@@ -326,8 +326,8 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu) ...@@ -326,8 +326,8 @@ int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
vcpu3s->context_id[0] = err; vcpu3s->context_id[0] = err;
vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1) vcpu3s->proto_vsid_max = ((vcpu3s->context_id[0] + 1)
<< USER_ESID_BITS) - 1; << ESID_BITS) - 1;
vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << USER_ESID_BITS; vcpu3s->proto_vsid_first = vcpu3s->context_id[0] << ESID_BITS;
vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first; vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first;
kvmppc_mmu_hpte_init(vcpu); kvmppc_mmu_hpte_init(vcpu);
......
...@@ -195,6 +195,11 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, ...@@ -195,6 +195,11 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
unsigned long vpn = hpt_vpn(vaddr, vsid, ssize); unsigned long vpn = hpt_vpn(vaddr, vsid, ssize);
unsigned long tprot = prot; unsigned long tprot = prot;
/*
* If we hit a bad address return error.
*/
if (!vsid)
return -1;
/* Make kernel text executable */ /* Make kernel text executable */
if (overlaps_kernel_text(vaddr, vaddr + step)) if (overlaps_kernel_text(vaddr, vaddr + step))
tprot &= ~HPTE_R_N; tprot &= ~HPTE_R_N;
...@@ -759,6 +764,8 @@ void __init early_init_mmu(void) ...@@ -759,6 +764,8 @@ void __init early_init_mmu(void)
/* Initialize stab / SLB management */ /* Initialize stab / SLB management */
if (mmu_has_feature(MMU_FTR_SLB)) if (mmu_has_feature(MMU_FTR_SLB))
slb_initialize(); slb_initialize();
else
stab_initialize(get_paca()->stab_real);
} }
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -922,11 +929,6 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) ...@@ -922,11 +929,6 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n", DBG_LOW("hash_page(ea=%016lx, access=%lx, trap=%lx\n",
ea, access, trap); ea, access, trap);
if ((ea & ~REGION_MASK) >= PGTABLE_RANGE) {
DBG_LOW(" out of pgtable range !\n");
return 1;
}
/* Get region & vsid */ /* Get region & vsid */
switch (REGION_ID(ea)) { switch (REGION_ID(ea)) {
case USER_REGION_ID: case USER_REGION_ID:
...@@ -957,6 +959,11 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap) ...@@ -957,6 +959,11 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
} }
DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid); DBG_LOW(" mm=%p, mm->pgdir=%p, vsid=%016lx\n", mm, mm->pgd, vsid);
/* Bad address. */
if (!vsid) {
DBG_LOW("Bad address!\n");
return 1;
}
/* Get pgdir */ /* Get pgdir */
pgdir = mm->pgd; pgdir = mm->pgd;
if (pgdir == NULL) if (pgdir == NULL)
...@@ -1126,6 +1133,8 @@ void hash_preload(struct mm_struct *mm, unsigned long ea, ...@@ -1126,6 +1133,8 @@ void hash_preload(struct mm_struct *mm, unsigned long ea,
/* Get VSID */ /* Get VSID */
ssize = user_segment_size(ea); ssize = user_segment_size(ea);
vsid = get_vsid(mm->context.id, ea, ssize); vsid = get_vsid(mm->context.id, ea, ssize);
if (!vsid)
return;
/* Hash doesn't like irqs */ /* Hash doesn't like irqs */
local_irq_save(flags); local_irq_save(flags);
...@@ -1233,6 +1242,9 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi) ...@@ -1233,6 +1242,9 @@ static void kernel_map_linear_page(unsigned long vaddr, unsigned long lmi)
hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize); hash = hpt_hash(vpn, PAGE_SHIFT, mmu_kernel_ssize);
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
/* Don't create HPTE entries for bad address */
if (!vsid)
return;
ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr), ret = ppc_md.hpte_insert(hpteg, vpn, __pa(vaddr),
mode, HPTE_V_BOLTED, mode, HPTE_V_BOLTED,
mmu_linear_psize, mmu_kernel_ssize); mmu_linear_psize, mmu_kernel_ssize);
......
...@@ -29,15 +29,6 @@ ...@@ -29,15 +29,6 @@
static DEFINE_SPINLOCK(mmu_context_lock); static DEFINE_SPINLOCK(mmu_context_lock);
static DEFINE_IDA(mmu_context_ida); static DEFINE_IDA(mmu_context_ida);
/*
* 256MB segment
* The proto-VSID space has 2^(CONTEX_BITS + USER_ESID_BITS) - 1 segments
* available for user mappings. Each segment contains 2^28 bytes. Each
* context maps 2^46 bytes (64TB) so we can support 2^19-1 contexts
* (19 == 37 + 28 - 46).
*/
#define MAX_CONTEXT ((1UL << CONTEXT_BITS) - 1)
int __init_new_context(void) int __init_new_context(void)
{ {
int index; int index;
...@@ -56,7 +47,7 @@ int __init_new_context(void) ...@@ -56,7 +47,7 @@ int __init_new_context(void)
else if (err) else if (err)
return err; return err;
if (index > MAX_CONTEXT) { if (index > MAX_USER_CONTEXT) {
spin_lock(&mmu_context_lock); spin_lock(&mmu_context_lock);
ida_remove(&mmu_context_ida, index); ida_remove(&mmu_context_ida, index);
spin_unlock(&mmu_context_lock); spin_unlock(&mmu_context_lock);
......
...@@ -61,7 +61,7 @@ ...@@ -61,7 +61,7 @@
#endif #endif
#ifdef CONFIG_PPC_STD_MMU_64 #ifdef CONFIG_PPC_STD_MMU_64
#if TASK_SIZE_USER64 > (1UL << (USER_ESID_BITS + SID_SHIFT)) #if TASK_SIZE_USER64 > (1UL << (ESID_BITS + SID_SHIFT))
#error TASK_SIZE_USER64 exceeds user VSID range #error TASK_SIZE_USER64 exceeds user VSID range
#endif #endif
#endif #endif
......
...@@ -31,10 +31,15 @@ ...@@ -31,10 +31,15 @@
* No other registers are examined or changed. * No other registers are examined or changed.
*/ */
_GLOBAL(slb_allocate_realmode) _GLOBAL(slb_allocate_realmode)
/* r3 = faulting address */ /*
* check for bad kernel/user address
* (ea & ~REGION_MASK) >= PGTABLE_RANGE
*/
rldicr. r9,r3,4,(63 - 46 - 4)
bne- 8f
srdi r9,r3,60 /* get region */ srdi r9,r3,60 /* get region */
srdi r10,r3,28 /* get esid */ srdi r10,r3,SID_SHIFT /* get esid */
cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */ cmpldi cr7,r9,0xc /* cmp PAGE_OFFSET for later use */
/* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */ /* r3 = address, r10 = esid, cr7 = <> PAGE_OFFSET */
...@@ -56,12 +61,14 @@ _GLOBAL(slb_allocate_realmode) ...@@ -56,12 +61,14 @@ _GLOBAL(slb_allocate_realmode)
*/ */
_GLOBAL(slb_miss_kernel_load_linear) _GLOBAL(slb_miss_kernel_load_linear)
li r11,0 li r11,0
li r9,0x1
/* /*
* for 1T we shift 12 bits more. slb_finish_load_1T will do * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
* the necessary adjustment * r9 = region id.
*/ */
rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0 addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
b slb_finish_load b slb_finish_load
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
...@@ -91,24 +98,19 @@ _GLOBAL(slb_miss_kernel_load_vmemmap) ...@@ -91,24 +98,19 @@ _GLOBAL(slb_miss_kernel_load_vmemmap)
_GLOBAL(slb_miss_kernel_load_io) _GLOBAL(slb_miss_kernel_load_io)
li r11,0 li r11,0
6: 6:
li r9,0x1
/* /*
* for 1T we shift 12 bits more. slb_finish_load_1T will do * context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
* the necessary adjustment * r9 = region id.
*/ */
rldimi r10,r9,(CONTEXT_BITS + USER_ESID_BITS),0 addis r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@ha
addi r9,r9,(MAX_USER_CONTEXT - 0xc + 1)@l
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
b slb_finish_load b slb_finish_load
END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
b slb_finish_load_1T b slb_finish_load_1T
0: /* user address: proto-VSID = context << 15 | ESID. First check 0:
* if the address is within the boundaries of the user region
*/
srdi. r9,r10,USER_ESID_BITS
bne- 8f /* invalid ea bits set */
/* when using slices, we extract the psize off the slice bitmaps /* when using slices, we extract the psize off the slice bitmaps
* and then we need to get the sllp encoding off the mmu_psize_defs * and then we need to get the sllp encoding off the mmu_psize_defs
* array. * array.
...@@ -164,15 +166,13 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT) ...@@ -164,15 +166,13 @@ END_MMU_FTR_SECTION_IFCLR(MMU_FTR_1T_SEGMENT)
ld r9,PACACONTEXTID(r13) ld r9,PACACONTEXTID(r13)
BEGIN_FTR_SECTION BEGIN_FTR_SECTION
cmpldi r10,0x1000 cmpldi r10,0x1000
END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
rldimi r10,r9,USER_ESID_BITS,0
BEGIN_FTR_SECTION
bge slb_finish_load_1T bge slb_finish_load_1T
END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT) END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
b slb_finish_load b slb_finish_load
8: /* invalid EA */ 8: /* invalid EA */
li r10,0 /* BAD_VSID */ li r10,0 /* BAD_VSID */
li r9,0 /* BAD_VSID */
li r11,SLB_VSID_USER /* flags don't much matter */ li r11,SLB_VSID_USER /* flags don't much matter */
b slb_finish_load b slb_finish_load
...@@ -221,8 +221,6 @@ _GLOBAL(slb_allocate_user) ...@@ -221,8 +221,6 @@ _GLOBAL(slb_allocate_user)
/* get context to calculate proto-VSID */ /* get context to calculate proto-VSID */
ld r9,PACACONTEXTID(r13) ld r9,PACACONTEXTID(r13)
rldimi r10,r9,USER_ESID_BITS,0
/* fall through slb_finish_load */ /* fall through slb_finish_load */
#endif /* __DISABLED__ */ #endif /* __DISABLED__ */
...@@ -231,9 +229,10 @@ _GLOBAL(slb_allocate_user) ...@@ -231,9 +229,10 @@ _GLOBAL(slb_allocate_user)
/* /*
* Finish loading of an SLB entry and return * Finish loading of an SLB entry and return
* *
* r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET * r3 = EA, r9 = context, r10 = ESID, r11 = flags, clobbers r9, cr7 = <> PAGE_OFFSET
*/ */
slb_finish_load: slb_finish_load:
rldimi r10,r9,ESID_BITS,0
ASM_VSID_SCRAMBLE(r10,r9,256M) ASM_VSID_SCRAMBLE(r10,r9,256M)
/* /*
* bits above VSID_BITS_256M need to be ignored from r10 * bits above VSID_BITS_256M need to be ignored from r10
...@@ -298,10 +297,11 @@ _GLOBAL(slb_compare_rr_to_size) ...@@ -298,10 +297,11 @@ _GLOBAL(slb_compare_rr_to_size)
/* /*
* Finish loading of a 1T SLB entry (for the kernel linear mapping) and return. * Finish loading of a 1T SLB entry (for the kernel linear mapping) and return.
* *
* r3 = EA, r10 = proto-VSID, r11 = flags, clobbers r9 * r3 = EA, r9 = context, r10 = ESID(256MB), r11 = flags, clobbers r9
*/ */
slb_finish_load_1T: slb_finish_load_1T:
srdi r10,r10,40-28 /* get 1T ESID */ srdi r10,r10,(SID_SHIFT_1T - SID_SHIFT) /* get 1T ESID */
rldimi r10,r9,ESID_BITS_1T,0
ASM_VSID_SCRAMBLE(r10,r9,1T) ASM_VSID_SCRAMBLE(r10,r9,1T)
/* /*
* bits above VSID_BITS_1T need to be ignored from r10 * bits above VSID_BITS_1T need to be ignored from r10
......
...@@ -82,11 +82,11 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr, ...@@ -82,11 +82,11 @@ void hpte_need_flush(struct mm_struct *mm, unsigned long addr,
if (!is_kernel_addr(addr)) { if (!is_kernel_addr(addr)) {
ssize = user_segment_size(addr); ssize = user_segment_size(addr);
vsid = get_vsid(mm->context.id, addr, ssize); vsid = get_vsid(mm->context.id, addr, ssize);
WARN_ON(vsid == 0);
} else { } else {
vsid = get_kernel_vsid(addr, mmu_kernel_ssize); vsid = get_kernel_vsid(addr, mmu_kernel_ssize);
ssize = mmu_kernel_ssize; ssize = mmu_kernel_ssize;
} }
WARN_ON(vsid == 0);
vpn = hpt_vpn(addr, vsid, ssize); vpn = hpt_vpn(addr, vsid, ssize);
rpte = __real_pte(__pte(pte), ptep); rpte = __real_pte(__pte(pte), ptep);
......
...@@ -124,9 +124,8 @@ config 6xx ...@@ -124,9 +124,8 @@ config 6xx
select PPC_HAVE_PMU_SUPPORT select PPC_HAVE_PMU_SUPPORT
config POWER3 config POWER3
bool
depends on PPC64 && PPC_BOOK3S depends on PPC64 && PPC_BOOK3S
default y if !POWER4_ONLY def_bool y
config POWER4 config POWER4
depends on PPC64 && PPC_BOOK3S depends on PPC64 && PPC_BOOK3S
...@@ -145,8 +144,7 @@ config TUNE_CELL ...@@ -145,8 +144,7 @@ config TUNE_CELL
but somewhat slower on other machines. This option only changes but somewhat slower on other machines. This option only changes
the scheduling of instructions, not the selection of instructions the scheduling of instructions, not the selection of instructions
itself, so the resulting kernel will keep running on all other itself, so the resulting kernel will keep running on all other
machines. When building a kernel that is supposed to run only machines.
on Cell, you should also select the POWER4_ONLY option.
# this is temp to handle compat with arch=ppc # this is temp to handle compat with arch=ppc
config 8xx config 8xx
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment