Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
aa8359a9
Commit
aa8359a9
authored
Jun 08, 2003
by
Anton Blanchard
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
ppc64: cputable support from Will Schmidt
parent
f9787d64
Changes
16
Show whitespace changes
Inline
Side-by-side
Showing
16 changed files
with
306 additions
and
33 deletions
+306
-33
arch/ppc64/kernel/Makefile
arch/ppc64/kernel/Makefile
+1
-1
arch/ppc64/kernel/align.c
arch/ppc64/kernel/align.c
+6
-6
arch/ppc64/kernel/asm-offsets.c
arch/ppc64/kernel/asm-offsets.c
+8
-0
arch/ppc64/kernel/chrp_setup.c
arch/ppc64/kernel/chrp_setup.c
+29
-0
arch/ppc64/kernel/cputable.c
arch/ppc64/kernel/cputable.c
+127
-0
arch/ppc64/kernel/head.S
arch/ppc64/kernel/head.S
+18
-0
arch/ppc64/kernel/htab.c
arch/ppc64/kernel/htab.c
+5
-2
arch/ppc64/kernel/iSeries_setup.c
arch/ppc64/kernel/iSeries_setup.c
+2
-1
arch/ppc64/kernel/misc.S
arch/ppc64/kernel/misc.S
+90
-0
arch/ppc64/kernel/pSeries_htab.c
arch/ppc64/kernel/pSeries_htab.c
+4
-3
arch/ppc64/kernel/process.c
arch/ppc64/kernel/process.c
+2
-1
arch/ppc64/kernel/smp.c
arch/ppc64/kernel/smp.c
+2
-1
arch/ppc64/kernel/stab.c
arch/ppc64/kernel/stab.c
+6
-5
arch/ppc64/mm/init.c
arch/ppc64/mm/init.c
+2
-1
arch/ppc64/vmlinux.lds.S
arch/ppc64/vmlinux.lds.S
+4
-0
include/asm-ppc64/processor.h
include/asm-ppc64/processor.h
+0
-12
No files found.
arch/ppc64/kernel/Makefile
View file @
aa8359a9
...
...
@@ -9,7 +9,7 @@ obj-y := setup.o entry.o traps.o irq.o idle.o \
align.o semaphore.o bitops.o stab.o htab.o pacaData.o
\
udbg.o binfmt_elf32.o sys_ppc32.o ioctl32.o
\
ptrace32.o signal32.o pmc.o rtc.o init_task.o
\
lmb.o pci.o pci_dn.o pci_dma.o
lmb.o pci.o pci_dn.o pci_dma.o
cputable.o
obj-$(CONFIG_PPC_ISERIES)
+=
iSeries_pci.o iSeries_pci_reset.o
\
iSeries_IoMmTable.o iSeries_irq.o
\
...
...
arch/ppc64/kernel/align.c
View file @
aa8359a9
...
...
@@ -20,6 +20,7 @@
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/cache.h>
#include <asm/cputable.h>
void
disable_kernel_fp
(
void
);
/* asm function from head.S */
...
...
@@ -238,12 +239,11 @@ fix_alignment(struct pt_regs *regs)
dsisr
=
regs
->
dsisr
;
/* Power4 doesn't set DSISR for an alignment interrupt */
if
(
!
cpu_alignexc_sets_dsisr
())
{
if
(
cur_cpu_spec
->
cpu_features
&
CPU_FTR_NODSISRALIGN
)
{
unsigned
int
real_instr
;
if
(
__get_user
(
real_instr
,
(
unsigned
int
*
)
regs
->
nip
))
return
0
;
dsisr
=
make_dsisr
(
real_instr
);
dsisr
=
make_dsisr
(
*
((
unsigned
*
)
regs
->
nip
)
);
}
/* extract the operation and registers from the dsisr */
...
...
arch/ppc64/kernel/asm-offsets.c
View file @
aa8359a9
...
...
@@ -34,6 +34,7 @@
#include <asm/iSeries/HvLpEvent.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/cputable.h>
#define DEFINE(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val))
...
...
@@ -159,5 +160,12 @@ int main(void)
DEFINE
(
CLONE_VM
,
CLONE_VM
);
DEFINE
(
CLONE_UNTRACED
,
CLONE_UNTRACED
);
/* About the CPU features table */
DEFINE
(
CPU_SPEC_ENTRY_SIZE
,
sizeof
(
struct
cpu_spec
));
DEFINE
(
CPU_SPEC_PVR_MASK
,
offsetof
(
struct
cpu_spec
,
pvr_mask
));
DEFINE
(
CPU_SPEC_PVR_VALUE
,
offsetof
(
struct
cpu_spec
,
pvr_value
));
DEFINE
(
CPU_SPEC_FEATURES
,
offsetof
(
struct
cpu_spec
,
cpu_features
));
DEFINE
(
CPU_SPEC_SETUP
,
offsetof
(
struct
cpu_spec
,
cpu_setup
));
return
0
;
}
arch/ppc64/kernel/chrp_setup.c
View file @
aa8359a9
...
...
@@ -62,6 +62,7 @@
#include "open_pic.h"
#include <asm/xics.h>
#include <asm/ppcdebug.h>
#include <asm/cputable.h>
extern
volatile
unsigned
char
*
chrp_int_ack_special
;
...
...
@@ -253,6 +254,34 @@ chrp_init(unsigned long r3, unsigned long r4, unsigned long r5,
ppc_md
.
progress
=
chrp_progress
;
/* build up the firmware_features bitmask field
* using contents of device-tree/ibm,hypertas-functions.
* Ultimately this functionality may be moved into prom.c prom_init().
*/
struct
device_node
*
dn
;
char
*
hypertas
;
unsigned
int
len
;
dn
=
find_path_device
(
"/rtas"
);
cur_cpu_spec
->
firmware_features
=
0
;
hypertas
=
get_property
(
dn
,
"ibm,hypertas-functions"
,
&
len
);
if
(
hypertas
)
{
while
(
len
>
0
){
int
i
;
/* check value against table of strings */
for
(
i
=
0
;
i
<
FIRMWARE_MAX_FEATURES
;
i
++
)
{
if
((
firmware_features_table
[
i
].
name
)
&&
(
strcmp
(
firmware_features_table
[
i
].
name
,
hypertas
))
==
0
)
{
/* we have a match */
cur_cpu_spec
->
firmware_features
|=
(
1UL
<<
firmware_features_table
[
i
].
val
);
break
;
}
}
int
hypertas_len
=
strlen
(
hypertas
);
len
-=
hypertas_len
+
1
;
hypertas
+=
hypertas_len
+
1
;
}
}
udbg_printf
(
"firmware_features bitmask: 0x%x
\n
"
,
cur_cpu_spec
->
firmware_features
);
}
void
...
...
arch/ppc64/kernel/cputable.c
0 → 100644
View file @
aa8359a9
/*
* arch/ppc64/kernel/cputable.c
*
* Copyright (C) 2001 Ben. Herrenschmidt (benh@kernel.crashing.org)
*
* Modifications for ppc64:
* Copyright (C) 2003 Dave Engebretsen <engebret@us.ibm.com>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/config.h>
#include <linux/string.h>
#include <linux/sched.h>
#include <linux/threads.h>
#include <linux/init.h>
#include <asm/cputable.h>
struct
cpu_spec
*
cur_cpu_spec
=
NULL
;
extern
void
__setup_cpu_power3
(
unsigned
long
offset
,
struct
cpu_spec
*
spec
);
extern
void
__setup_cpu_power4
(
unsigned
long
offset
,
struct
cpu_spec
*
spec
);
/* We only set the altivec features if the kernel was compiled with altivec
* support
*/
#ifdef CONFIG_ALTIVEC
#define CPU_FTR_ALTIVEC_COMP CPU_FTR_ALTIVEC
#else
#define CPU_FTR_ALTIVEC_COMP 0
#endif
struct
cpu_spec
cpu_specs
[]
=
{
{
/* Power3 */
0xffff0000
,
0x00400000
,
"Power3 (630)"
,
CPU_FTR_SPLIT_ID_CACHE
|
CPU_FTR_USE_TB
|
CPU_FTR_HPTE_TABLE
,
COMMON_USER_PPC64
,
128
,
128
,
__setup_cpu_power3
,
COMMON_PPC64_FW
},
{
/* Power3+ */
0xffff0000
,
0x00410000
,
"Power3 (630+)"
,
CPU_FTR_SPLIT_ID_CACHE
|
CPU_FTR_USE_TB
|
CPU_FTR_HPTE_TABLE
,
COMMON_USER_PPC64
,
128
,
128
,
__setup_cpu_power3
,
COMMON_PPC64_FW
},
{
/* Northstar */
0xffff0000
,
0x00330000
,
"Northstar"
,
CPU_FTR_SPLIT_ID_CACHE
|
CPU_FTR_USE_TB
|
CPU_FTR_HPTE_TABLE
,
COMMON_USER_PPC64
,
128
,
128
,
__setup_cpu_power3
,
COMMON_PPC64_FW
},
{
/* Pulsar */
0xffff0000
,
0x00340000
,
"Pulsar"
,
CPU_FTR_SPLIT_ID_CACHE
|
CPU_FTR_USE_TB
|
CPU_FTR_HPTE_TABLE
,
COMMON_USER_PPC64
,
128
,
128
,
__setup_cpu_power3
,
COMMON_PPC64_FW
},
{
/* I-star */
0xffff0000
,
0x00360000
,
"I-star"
,
CPU_FTR_SPLIT_ID_CACHE
|
CPU_FTR_USE_TB
|
CPU_FTR_HPTE_TABLE
,
COMMON_USER_PPC64
,
128
,
128
,
__setup_cpu_power3
,
COMMON_PPC64_FW
},
{
/* S-star */
0xffff0000
,
0x00370000
,
"S-star"
,
CPU_FTR_SPLIT_ID_CACHE
|
CPU_FTR_USE_TB
|
CPU_FTR_HPTE_TABLE
,
COMMON_USER_PPC64
,
128
,
128
,
__setup_cpu_power3
,
COMMON_PPC64_FW
},
{
/* Power4 */
0xffff0000
,
0x00350000
,
"Power4"
,
CPU_FTR_SPLIT_ID_CACHE
|
CPU_FTR_USE_TB
|
CPU_FTR_HPTE_TABLE
|
CPU_FTR_PPCAS_ARCH_V2
,
COMMON_USER_PPC64
,
128
,
128
,
__setup_cpu_power4
,
COMMON_PPC64_FW
},
{
/* Power4+ */
0xffff0000
,
0x00380000
,
"Power4+"
,
CPU_FTR_SPLIT_ID_CACHE
|
CPU_FTR_USE_TB
|
CPU_FTR_HPTE_TABLE
|
CPU_FTR_PPCAS_ARCH_V2
,
COMMON_USER_PPC64
,
128
,
128
,
__setup_cpu_power4
,
COMMON_PPC64_FW
},
{
/* default match */
0x00000000
,
0x00000000
,
"(Power4-Compatible)"
,
CPU_FTR_SPLIT_ID_CACHE
|
CPU_FTR_USE_TB
|
CPU_FTR_HPTE_TABLE
|
CPU_FTR_PPCAS_ARCH_V2
,
COMMON_USER_PPC64
,
128
,
128
,
__setup_cpu_power4
,
COMMON_PPC64_FW
}
};
firmware_feature_t
firmware_features_table
[
FIRMWARE_MAX_FEATURES
]
=
{
{
FW_FEATURE_PFT
,
"hcall-pft"
},
{
FW_FEATURE_TCE
,
"hcall-tce"
},
{
FW_FEATURE_SPRG0
,
"hcall-sprg0"
},
{
FW_FEATURE_DABR
,
"hcall-dabr"
},
{
FW_FEATURE_COPY
,
"hcall-copy"
},
{
FW_FEATURE_ASR
,
"hcall-asr"
},
{
FW_FEATURE_DEBUG
,
"hcall-debug"
},
{
FW_FEATURE_PERF
,
"hcall-perf"
},
{
FW_FEATURE_DUMP
,
"hcall-dump"
},
{
FW_FEATURE_INTERRUPT
,
"hcall-interrupt"
},
{
FW_FEATURE_MIGRATE
,
"hcall-migrate"
},
};
arch/ppc64/kernel/head.S
View file @
aa8359a9
...
...
@@ -34,6 +34,7 @@
#include <asm/ppc_asm.h>
#include <asm/offsets.h>
#include <asm/bug.h>
#include <asm/cputable.h>
#ifdef CONFIG_PPC_ISERIES
#define DO_SOFT_DISABLE
...
...
@@ -1267,6 +1268,11 @@ _GLOBAL(__start_initialization_iSeries)
li
r0
,
0
stdu
r0
,-
STACK_FRAME_OVERHEAD
(
r1
)
LOADADDR
(
r3
,
cpu_specs
)
LOADADDR
(
r4
,
cur_cpu_spec
)
li
r5
,
0
bl
.
identify_cpu
LOADADDR
(
r2
,
__toc_start
)
addi
r2
,
r2
,
0x4000
addi
r2
,
r2
,
0x4000
...
...
@@ -1730,6 +1736,13 @@ _STATIC(start_here_pSeries)
li
r0
,
0
stdu
r0
,-
STACK_FRAME_OVERHEAD
(
r1
)
LOADADDR
(
r3
,
cpu_specs
)
sub
r3
,
r3
,
r26
LOADADDR
(
r4
,
cur_cpu_spec
)
sub
r4
,
r4
,
r26
mr
r5
,
r26
bl
.
identify_cpu
/
*
set
up
the
TOC
(
physical
address
)
*/
LOADADDR
(
r2
,
__toc_start
)
addi
r2
,
r2
,
0x4000
...
...
@@ -1888,6 +1901,11 @@ _STATIC(start_here_common)
bl
.
start_kernel
_GLOBAL
(
__setup_cpu_power3
)
blr
_GLOBAL
(
__setup_cpu_power4
)
blr
_GLOBAL
(
hmt_init
)
#ifdef CONFIG_HMT
LOADADDR
(
r5
,
hmt_thread_data
)
...
...
arch/ppc64/kernel/htab.c
View file @
aa8359a9
...
...
@@ -46,6 +46,7 @@
#include <asm/eeh.h>
#include <asm/tlb.h>
#include <asm/cacheflush.h>
#include <asm/cputable.h>
/*
* Note: pte --> Linux PTE
...
...
@@ -165,7 +166,8 @@ htab_initialize(void)
mode_rw
=
_PAGE_ACCESSED
|
_PAGE_COHERENT
|
PP_RWXX
;
/* XXX we currently map kernel text rw, should fix this */
if
(
cpu_has_largepage
()
&&
systemcfg
->
physicalMemorySize
>
256
*
MB
)
{
if
((
cur_cpu_spec
->
cpu_features
&
CPU_FTR_16M_PAGE
)
&&
systemcfg
->
physicalMemorySize
>
256
*
MB
)
{
create_pte_mapping
((
unsigned
long
)
KERNELBASE
,
KERNELBASE
+
256
*
MB
,
mode_rw
,
0
);
create_pte_mapping
((
unsigned
long
)
KERNELBASE
+
256
*
MB
,
...
...
@@ -279,7 +281,8 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid,
#define PPC64_HWNOEXEC (1 << 2)
/* We do lazy icache flushing on cpus that support it */
if
(
unlikely
(
cpu_has_noexecute
()
&&
pfn_valid
(
pte_pfn
(
new_pte
))))
{
if
(
unlikely
((
cur_cpu_spec
->
cpu_features
&
CPU_FTR_NOEXECUTE
)
&&
pfn_valid
(
pte_pfn
(
new_pte
))))
{
struct
page
*
page
=
pte_page
(
new_pte
);
/* page is dirty */
...
...
arch/ppc64/kernel/iSeries_setup.c
View file @
aa8359a9
...
...
@@ -33,6 +33,7 @@
#include <asm/mmu.h>
#include <asm/pgtable.h>
#include <asm/mmu_context.h>
#include <asm/cputable.h>
#include <asm/time.h>
#include "iSeries_setup.h"
...
...
@@ -254,7 +255,7 @@ unsigned long iSeries_process_mainstore_vpd( struct MemoryBlock *mb_array, unsig
{
unsigned
long
i
;
unsigned
long
mem_blocks
=
0
;
if
(
__is_processor
(
PV_POWER4
)
||
__is_processor
(
PV_POWER4p
)
)
if
(
cur_cpu_spec
->
cpu_features
&
CPU_FTR_SLB
)
mem_blocks
=
iSeries_process_Regatta_mainstore_vpd
(
mb_array
,
max_entries
);
else
mem_blocks
=
iSeries_process_Condor_mainstore_vpd
(
mb_array
,
max_entries
);
...
...
arch/ppc64/kernel/misc.S
View file @
aa8359a9
...
...
@@ -27,6 +27,7 @@
#include <asm/cache.h>
#include <asm/ppc_asm.h>
#include <asm/offsets.h>
#include <asm/cputable.h>
.
text
...
...
@@ -444,6 +445,95 @@ _GLOBAL(cvt_df)
stfd
0
,
0
(
r5
)
blr
/*
*
identify_cpu
,
*
In
:
r3
=
base
of
the
cpu_specs
array
*
r4
=
address
of
cur_cpu_spec
*
r5
=
relocation
offset
*/
_GLOBAL
(
identify_cpu
)
mfpvr
r7
1
:
lwz
r8
,
CPU_SPEC_PVR_MASK
(
r3
)
and
r8
,
r8
,
r7
lwz
r9
,
CPU_SPEC_PVR_VALUE
(
r3
)
cmplw
0
,
r9
,
r8
beq
1
f
addi
r3
,
r3
,
CPU_SPEC_ENTRY_SIZE
b
1
b
1
:
add
r3
,
r3
,
r5
std
r3
,
0
(
r4
)
blr
/*
*
do_cpu_ftr_fixups
-
goes
through
the
list
of
CPU
feature
fixups
*
and
writes
nop
's over sections of code that don'
t
apply
for
this
cpu
.
*
r3
=
data
offset
(
not
changed
)
*/
_GLOBAL
(
do_cpu_ftr_fixups
)
/
*
Get
CPU
0
features
*/
LOADADDR
(
r6
,
cur_cpu_spec
)
sub
r6
,
r6
,
r3
ld
r4
,
0
(
r6
)
sub
r4
,
r4
,
r3
ld
r4
,
CPU_SPEC_FEATURES
(
r4
)
/
*
Get
the
fixup
table
*/
LOADADDR
(
r6
,
__start___ftr_fixup
)
sub
r6
,
r6
,
r3
LOADADDR
(
r7
,
__stop___ftr_fixup
)
sub
r7
,
r7
,
r3
/
*
Do
the
fixup
*/
1
:
cmpld
r6
,
r7
bgelr
addi
r6
,
r6
,
32
ld
r8
,-
32
(
r6
)
/*
mask
*/
and
r8
,
r8
,
r4
ld
r9
,-
24
(
r6
)
/*
value
*/
cmpld
r8
,
r9
beq
1
b
ld
r8
,-
16
(
r6
)
/*
section
begin
*/
ld
r9
,-
8
(
r6
)
/*
section
end
*/
subf
.
r9
,
r8
,
r9
beq
1
b
/
*
write
nops
over
the
section
of
code
*/
/
*
todo
:
if
large
section
,
add
a
branch
at
the
start
of
it
*/
srwi
r9
,
r9
,
2
mtctr
r9
sub
r8
,
r8
,
r3
lis
r0
,
0x60000000
@
h
/*
nop
*/
3
:
stw
r0
,
0
(
r8
)
andi
.
r10
,
r4
,
CPU_FTR_SPLIT_ID_CACHE
@
l
beq
2
f
dcbst
0
,
r8
/*
suboptimal
,
but
simpler
*/
sync
icbi
0
,
r8
2
:
addi
r8
,
r8
,
4
bdnz
3
b
sync
/*
additional
sync
needed
on
g4
*/
isync
b
1
b
/*
*
call_setup_cpu
-
call
the
setup_cpu
function
for
this
cpu
*
r3
=
data
offset
*
*
Setup
function
is
called
with
:
*
r3
=
data
offset
*
r4
=
ptr
to
CPU
spec
(
relocated
)
*/
_GLOBAL
(
call_setup_cpu
)
LOADADDR
(
r4
,
cur_cpu_spec
)
sub
r4
,
r4
,
r3
lwz
r4
,
0
(
r4
)
#
load
pointer
to
cpu_spec
sub
r4
,
r4
,
r3
#
relocate
lwz
r6
,
CPU_SPEC_SETUP
(
r4
)
#
load
function
pointer
sub
r6
,
r6
,
r3
mtctr
r6
bctr
/*
*
Create
a
kernel
thread
*
kernel_thread
(
fn
,
arg
,
flags
)
...
...
arch/ppc64/kernel/pSeries_htab.c
View file @
aa8359a9
...
...
@@ -21,6 +21,7 @@
#include <asm/pgtable.h>
#include <asm/tlbflush.h>
#include <asm/tlb.h>
#include <asm/cputable.h>
#define HPTE_LOCK_BIT 3
...
...
@@ -217,7 +218,7 @@ static long pSeries_hpte_updatepp(unsigned long slot, unsigned long newpp,
}
/* Ensure it is out of the tlb too */
if
(
cpu_has_tlbiel
(
)
&&
!
large
&&
local
)
{
if
(
(
cur_cpu_spec
->
cpu_features
&
CPU_FTR_TLBIEL
)
&&
!
large
&&
local
)
{
_tlbiel
(
va
);
}
else
{
spin_lock_irqsave
(
&
pSeries_tlbie_lock
,
flags
);
...
...
@@ -283,7 +284,7 @@ static void pSeries_hpte_invalidate(unsigned long slot, unsigned long va,
}
/* Invalidate the tlb */
if
(
cpu_has_tlbiel
(
)
&&
!
large
&&
local
)
{
if
(
(
cur_cpu_spec
->
cpu_features
&
CPU_FTR_TLBIEL
)
&&
!
large
&&
local
)
{
_tlbiel
(
va
);
}
else
{
spin_lock_irqsave
(
&
pSeries_tlbie_lock
,
flags
);
...
...
@@ -346,7 +347,7 @@ static void pSeries_flush_hash_range(unsigned long context,
j
++
;
}
if
(
cpu_has_tlbiel
(
)
&&
!
large
&&
local
)
{
if
(
(
cur_cpu_spec
->
cpu_features
&
CPU_FTR_TLBIEL
)
&&
!
large
&&
local
)
{
asm
volatile
(
"ptesync"
:::
"memory"
);
for
(
i
=
0
;
i
<
j
;
i
++
)
{
...
...
arch/ppc64/kernel/process.c
View file @
aa8359a9
...
...
@@ -45,6 +45,7 @@
#include <asm/machdep.h>
#include <asm/iSeries/HvCallHpt.h>
#include <asm/hardirq.h>
#include <asm/cputable.h>
struct
task_struct
*
last_task_used_math
=
NULL
;
...
...
@@ -412,7 +413,7 @@ void initialize_paca_hardware_interrupt_stack(void)
* __get_free_pages() might give us a page > KERNBASE+256M which
* is mapped with large ptes so we can't set up the guard page.
*/
if
(
c
pu_has_largepage
()
)
if
(
c
ur_cpu_spec
->
cpu_features
&
CPU_FTR_16M_PAGE
)
return
;
for
(
i
=
0
;
i
<
NR_CPUS
;
i
++
)
{
...
...
arch/ppc64/kernel/smp.c
View file @
aa8359a9
...
...
@@ -47,6 +47,7 @@
#include "open_pic.h"
#include <asm/machdep.h>
#include <asm/xics.h>
#include <asm/cputable.h>
int
smp_threads_ready
;
unsigned
long
cache_decay_ticks
;
...
...
@@ -583,7 +584,7 @@ int __devinit __cpu_up(unsigned int cpu)
paca
[
cpu
].
prof_multiplier
=
1
;
paca
[
cpu
].
default_decr
=
tb_ticks_per_jiffy
/
decr_overclock
;
if
(
!
cpu_has_slb
(
))
{
if
(
!
(
cur_cpu_spec
->
cpu_features
&
CPU_FTR_SLB
))
{
void
*
tmp
;
/* maximum of 48 CPUs on machines with a segment table */
...
...
arch/ppc64/kernel/stab.c
View file @
aa8359a9
...
...
@@ -21,6 +21,7 @@
#include <asm/paca.h>
#include <asm/naca.h>
#include <asm/pmc.h>
#include <asm/cputable.h>
int
make_ste
(
unsigned
long
stab
,
unsigned
long
esid
,
unsigned
long
vsid
);
void
make_slbe
(
unsigned
long
esid
,
unsigned
long
vsid
,
int
large
,
...
...
@@ -38,7 +39,7 @@ void stab_initialize(unsigned long stab)
esid
=
GET_ESID
(
KERNELBASE
);
vsid
=
get_kernel_vsid
(
esid
<<
SID_SHIFT
);
if
(
c
pu_has_slb
()
)
{
if
(
c
ur_cpu_spec
->
cpu_features
&
CPU_FTR_SLB
)
{
/* Invalidate the entire SLB & all the ERATS */
#ifdef CONFIG_PPC_ISERIES
asm
volatile
(
"isync; slbia; isync"
:::
"memory"
);
...
...
@@ -222,7 +223,7 @@ void make_slbe(unsigned long esid, unsigned long vsid, int large,
static
inline
void
__ste_allocate
(
unsigned
long
esid
,
unsigned
long
vsid
,
int
kernel_segment
)
{
if
(
c
pu_has_slb
()
)
{
if
(
c
ur_cpu_spec
->
cpu_features
&
CPU_FTR_SLB
)
{
#ifndef CONFIG_PPC_ISERIES
if
(
REGION_ID
(
esid
<<
SID_SHIFT
)
==
KERNEL_REGION_ID
)
make_slbe
(
esid
,
vsid
,
1
,
kernel_segment
);
...
...
@@ -275,7 +276,7 @@ int ste_allocate(unsigned long ea)
esid
=
GET_ESID
(
ea
);
__ste_allocate
(
esid
,
vsid
,
kernel_segment
);
if
(
!
cpu_has_slb
(
))
{
if
(
!
(
cur_cpu_spec
->
cpu_features
&
CPU_FTR_SLB
))
{
/* Order update */
asm
volatile
(
"sync"
:::
"memory"
);
}
...
...
@@ -327,7 +328,7 @@ static void preload_stab(struct task_struct *tsk, struct mm_struct *mm)
}
}
if
(
!
cpu_has_slb
(
))
{
if
(
!
(
cur_cpu_spec
->
cpu_features
&
CPU_FTR_SLB
))
{
/* Order update */
asm
volatile
(
"sync"
:
:
:
"memory"
);
}
...
...
@@ -336,7 +337,7 @@ static void preload_stab(struct task_struct *tsk, struct mm_struct *mm)
/* Flush all user entries from the segment table of the current processor. */
void
flush_stab
(
struct
task_struct
*
tsk
,
struct
mm_struct
*
mm
)
{
if
(
c
pu_has_slb
()
)
{
if
(
c
ur_cpu_spec
->
cpu_features
&
CPU_FTR_SLB
)
{
/*
* XXX disable 32bit slb invalidate optimisation until we fix
* the issue where a 32bit app execed out of a 64bit app can
...
...
arch/ppc64/mm/init.c
View file @
aa8359a9
...
...
@@ -59,6 +59,7 @@
#include <asm/eeh.h>
#include <asm/processor.h>
#include <asm/mmzone.h>
#include <asm/cputable.h>
#include <asm/ppcdebug.h>
...
...
@@ -699,7 +700,7 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
int
local
=
0
;
/* handle i-cache coherency */
if
(
!
cpu_has_noexecute
(
))
{
if
(
!
(
cur_cpu_spec
->
cpu_features
&
CPU_FTR_NOEXECUTE
))
{
unsigned
long
pfn
=
pte_pfn
(
pte
);
if
(
pfn_valid
(
pfn
))
{
struct
page
*
page
=
pfn_to_page
(
pfn
);
...
...
arch/ppc64/vmlinux.lds.S
View file @
aa8359a9
...
...
@@ -65,6 +65,10 @@ SECTIONS
__ex_table
:
{
*(
__ex_table
)
}
__stop___ex_table
=
.
;
__start___ftr_fixup
=
.
;
__ftr_fixup
:
{
*(
__ftr_fixup
)
}
__stop___ftr_fixup
=
.
;
.
=
ALIGN
(
16384
)
; /* init_task */
.
data.
init_task
:
{
*(
.
data
.
init_task
)
}
...
...
include/asm-ppc64/processor.h
View file @
aa8359a9
...
...
@@ -730,18 +730,6 @@ static inline void prefetchw(const void *x)
#define spin_lock_prefetch(x) prefetchw(x)
#define cpu_has_largepage() (processor_type() == PV_POWER4 || \
processor_type() == PV_POWER4p)
#define cpu_has_slb() (processor_type() == PV_POWER4 || \
processor_type() == PV_POWER4p)
#define cpu_has_tlbiel() (processor_type() == PV_POWER4 || \
processor_type() == PV_POWER4p)
#define cpu_has_noexecute() (processor_type() == PV_POWER4 || \
processor_type() == PV_POWER4p)
/* XXX we have to call HV to set when in LPAR */
#define cpu_has_dabr() (1)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment