Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
ddbefe7e
Commit
ddbefe7e
authored
Nov 24, 2016
by
Michael Ellerman
Browse files
Options
Browse Files
Download
Plain Diff
Merge branch 'topic/ppc-kvm' into next
Merge the topic branch we're sharing with the kvm-ppc tree.
parents
3382a622
02ed21ae
Changes
13
Hide whitespace changes
Inline
Side-by-side
Showing
13 changed files
with
138 additions
and
53 deletions
+138
-53
arch/powerpc/include/asm/book3s/64/mmu-hash.h
arch/powerpc/include/asm/book3s/64/mmu-hash.h
+39
-8
arch/powerpc/include/asm/mmu.h
arch/powerpc/include/asm/mmu.h
+5
-0
arch/powerpc/include/asm/opal.h
arch/powerpc/include/asm/opal.h
+3
-0
arch/powerpc/include/asm/reg.h
arch/powerpc/include/asm/reg.h
+15
-0
arch/powerpc/kernel/cpu_setup_power.S
arch/powerpc/kernel/cpu_setup_power.S
+1
-1
arch/powerpc/mm/hash_native_64.c
arch/powerpc/mm/hash_native_64.c
+24
-6
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/hash_utils_64.c
+4
-24
arch/powerpc/mm/pgtable-radix.c
arch/powerpc/mm/pgtable-radix.c
+6
-12
arch/powerpc/mm/pgtable_64.c
arch/powerpc/mm/pgtable_64.c
+34
-0
arch/powerpc/platforms/powernv/opal-wrappers.S
arch/powerpc/platforms/powernv/opal-wrappers.S
+3
-0
arch/powerpc/platforms/powernv/opal.c
arch/powerpc/platforms/powernv/opal.c
+2
-0
arch/powerpc/platforms/ps3/htab.c
arch/powerpc/platforms/ps3/htab.c
+1
-1
arch/powerpc/platforms/pseries/lpar.c
arch/powerpc/platforms/pseries/lpar.c
+1
-1
No files found.
arch/powerpc/include/asm/book3s/64/mmu-hash.h
View file @
ddbefe7e
...
@@ -70,7 +70,9 @@
...
@@ -70,7 +70,9 @@
#define HPTE_V_SSIZE_SHIFT 62
#define HPTE_V_SSIZE_SHIFT 62
#define HPTE_V_AVPN_SHIFT 7
#define HPTE_V_AVPN_SHIFT 7
#define HPTE_V_COMMON_BITS ASM_CONST(0x000fffffffffffff)
#define HPTE_V_AVPN ASM_CONST(0x3fffffffffffff80)
#define HPTE_V_AVPN ASM_CONST(0x3fffffffffffff80)
#define HPTE_V_AVPN_3_0 ASM_CONST(0x000fffffffffff80)
#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
#define HPTE_V_AVPN_VAL(x) (((x) & HPTE_V_AVPN) >> HPTE_V_AVPN_SHIFT)
#define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & 0xffffffffffffff80UL))
#define HPTE_V_COMPARE(x,y) (!(((x) ^ (y)) & 0xffffffffffffff80UL))
#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
#define HPTE_V_BOLTED ASM_CONST(0x0000000000000010)
...
@@ -80,14 +82,16 @@
...
@@ -80,14 +82,16 @@
#define HPTE_V_VALID ASM_CONST(0x0000000000000001)
#define HPTE_V_VALID ASM_CONST(0x0000000000000001)
/*
/*
* ISA 3.0 ha
ve
a different HPTE format.
* ISA 3.0 ha
s
a different HPTE format.
*/
*/
#define HPTE_R_3_0_SSIZE_SHIFT 58
#define HPTE_R_3_0_SSIZE_SHIFT 58
#define HPTE_R_3_0_SSIZE_MASK (3ull << HPTE_R_3_0_SSIZE_SHIFT)
#define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
#define HPTE_R_PP0 ASM_CONST(0x8000000000000000)
#define HPTE_R_TS ASM_CONST(0x4000000000000000)
#define HPTE_R_TS ASM_CONST(0x4000000000000000)
#define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000)
#define HPTE_R_KEY_HI ASM_CONST(0x3000000000000000)
#define HPTE_R_RPN_SHIFT 12
#define HPTE_R_RPN_SHIFT 12
#define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
#define HPTE_R_RPN ASM_CONST(0x0ffffffffffff000)
#define HPTE_R_RPN_3_0 ASM_CONST(0x01fffffffffff000)
#define HPTE_R_PP ASM_CONST(0x0000000000000003)
#define HPTE_R_PP ASM_CONST(0x0000000000000003)
#define HPTE_R_PPP ASM_CONST(0x8000000000000003)
#define HPTE_R_PPP ASM_CONST(0x8000000000000003)
#define HPTE_R_N ASM_CONST(0x0000000000000004)
#define HPTE_R_N ASM_CONST(0x0000000000000004)
...
@@ -316,11 +320,42 @@ static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
...
@@ -316,11 +320,42 @@ static inline unsigned long hpte_encode_avpn(unsigned long vpn, int psize,
*/
*/
v
=
(
vpn
>>
(
23
-
VPN_SHIFT
))
&
~
(
mmu_psize_defs
[
psize
].
avpnm
);
v
=
(
vpn
>>
(
23
-
VPN_SHIFT
))
&
~
(
mmu_psize_defs
[
psize
].
avpnm
);
v
<<=
HPTE_V_AVPN_SHIFT
;
v
<<=
HPTE_V_AVPN_SHIFT
;
if
(
!
cpu_has_feature
(
CPU_FTR_ARCH_300
))
v
|=
((
unsigned
long
)
ssize
)
<<
HPTE_V_SSIZE_SHIFT
;
v
|=
((
unsigned
long
)
ssize
)
<<
HPTE_V_SSIZE_SHIFT
;
return
v
;
return
v
;
}
}
/*
* ISA v3.0 defines a new HPTE format, which differs from the old
* format in having smaller AVPN and ARPN fields, and the B field
* in the second dword instead of the first.
*/
static
inline
unsigned
long
hpte_old_to_new_v
(
unsigned
long
v
)
{
/* trim AVPN, drop B */
return
v
&
HPTE_V_COMMON_BITS
;
}
static
inline
unsigned
long
hpte_old_to_new_r
(
unsigned
long
v
,
unsigned
long
r
)
{
/* move B field from 1st to 2nd dword, trim ARPN */
return
(
r
&
~
HPTE_R_3_0_SSIZE_MASK
)
|
(((
v
)
>>
HPTE_V_SSIZE_SHIFT
)
<<
HPTE_R_3_0_SSIZE_SHIFT
);
}
static
inline
unsigned
long
hpte_new_to_old_v
(
unsigned
long
v
,
unsigned
long
r
)
{
/* insert B field */
return
(
v
&
HPTE_V_COMMON_BITS
)
|
((
r
&
HPTE_R_3_0_SSIZE_MASK
)
<<
(
HPTE_V_SSIZE_SHIFT
-
HPTE_R_3_0_SSIZE_SHIFT
));
}
static
inline
unsigned
long
hpte_new_to_old_r
(
unsigned
long
r
)
{
/* clear out B field */
return
r
&
~
HPTE_R_3_0_SSIZE_MASK
;
}
/*
/*
* This function sets the AVPN and L fields of the HPTE appropriately
* This function sets the AVPN and L fields of the HPTE appropriately
* using the base page size and actual page size.
* using the base page size and actual page size.
...
@@ -341,12 +376,8 @@ static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize,
...
@@ -341,12 +376,8 @@ static inline unsigned long hpte_encode_v(unsigned long vpn, int base_psize,
* aligned for the requested page size
* aligned for the requested page size
*/
*/
static
inline
unsigned
long
hpte_encode_r
(
unsigned
long
pa
,
int
base_psize
,
static
inline
unsigned
long
hpte_encode_r
(
unsigned
long
pa
,
int
base_psize
,
int
actual_psize
,
int
ssize
)
int
actual_psize
)
{
{
if
(
cpu_has_feature
(
CPU_FTR_ARCH_300
))
pa
|=
((
unsigned
long
)
ssize
)
<<
HPTE_R_3_0_SSIZE_SHIFT
;
/* A 4K page needs no special encoding */
/* A 4K page needs no special encoding */
if
(
actual_psize
==
MMU_PAGE_4K
)
if
(
actual_psize
==
MMU_PAGE_4K
)
return
pa
&
HPTE_R_RPN
;
return
pa
&
HPTE_R_RPN
;
...
...
arch/powerpc/include/asm/mmu.h
View file @
ddbefe7e
...
@@ -208,6 +208,11 @@ extern u64 ppc64_rma_size;
...
@@ -208,6 +208,11 @@ extern u64 ppc64_rma_size;
/* Cleanup function used by kexec */
/* Cleanup function used by kexec */
extern
void
mmu_cleanup_all
(
void
);
extern
void
mmu_cleanup_all
(
void
);
extern
void
radix__mmu_cleanup_all
(
void
);
extern
void
radix__mmu_cleanup_all
(
void
);
/* Functions for creating and updating partition table on POWER9 */
extern
void
mmu_partition_table_init
(
void
);
extern
void
mmu_partition_table_set_entry
(
unsigned
int
lpid
,
unsigned
long
dw0
,
unsigned
long
dw1
);
#endif
/* CONFIG_PPC64 */
#endif
/* CONFIG_PPC64 */
struct
mm_struct
;
struct
mm_struct
;
...
...
arch/powerpc/include/asm/opal.h
View file @
ddbefe7e
...
@@ -220,9 +220,12 @@ int64_t opal_pci_set_power_state(uint64_t async_token, uint64_t id,
...
@@ -220,9 +220,12 @@ int64_t opal_pci_set_power_state(uint64_t async_token, uint64_t id,
int64_t
opal_pci_poll2
(
uint64_t
id
,
uint64_t
data
);
int64_t
opal_pci_poll2
(
uint64_t
id
,
uint64_t
data
);
int64_t
opal_int_get_xirr
(
uint32_t
*
out_xirr
,
bool
just_poll
);
int64_t
opal_int_get_xirr
(
uint32_t
*
out_xirr
,
bool
just_poll
);
int64_t
opal_rm_int_get_xirr
(
__be32
*
out_xirr
,
bool
just_poll
);
int64_t
opal_int_set_cppr
(
uint8_t
cppr
);
int64_t
opal_int_set_cppr
(
uint8_t
cppr
);
int64_t
opal_int_eoi
(
uint32_t
xirr
);
int64_t
opal_int_eoi
(
uint32_t
xirr
);
int64_t
opal_rm_int_eoi
(
uint32_t
xirr
);
int64_t
opal_int_set_mfrr
(
uint32_t
cpu
,
uint8_t
mfrr
);
int64_t
opal_int_set_mfrr
(
uint32_t
cpu
,
uint8_t
mfrr
);
int64_t
opal_rm_int_set_mfrr
(
uint32_t
cpu
,
uint8_t
mfrr
);
int64_t
opal_pci_tce_kill
(
uint64_t
phb_id
,
uint32_t
kill_type
,
int64_t
opal_pci_tce_kill
(
uint64_t
phb_id
,
uint32_t
kill_type
,
uint32_t
pe_num
,
uint32_t
tce_size
,
uint32_t
pe_num
,
uint32_t
tce_size
,
uint64_t
dma_addr
,
uint32_t
npages
);
uint64_t
dma_addr
,
uint32_t
npages
);
...
...
arch/powerpc/include/asm/reg.h
View file @
ddbefe7e
...
@@ -153,6 +153,8 @@
...
@@ -153,6 +153,8 @@
#define PSSCR_EC 0x00100000
/* Exit Criterion */
#define PSSCR_EC 0x00100000
/* Exit Criterion */
#define PSSCR_ESL 0x00200000
/* Enable State Loss */
#define PSSCR_ESL 0x00200000
/* Enable State Loss */
#define PSSCR_SD 0x00400000
/* Status Disable */
#define PSSCR_SD 0x00400000
/* Status Disable */
#define PSSCR_PLS 0xf000000000000000
/* Power-saving Level Status */
#define PSSCR_GUEST_VIS 0xf0000000000003ff
/* Guest-visible PSSCR fields */
/* Floating Point Status and Control Register (FPSCR) Fields */
/* Floating Point Status and Control Register (FPSCR) Fields */
#define FPSCR_FX 0x80000000
/* FPU exception summary */
#define FPSCR_FX 0x80000000
/* FPU exception summary */
...
@@ -236,6 +238,7 @@
...
@@ -236,6 +238,7 @@
#define SPRN_TEXASRU 0x83
/* '' '' '' Upper 32 */
#define SPRN_TEXASRU 0x83
/* '' '' '' Upper 32 */
#define TEXASR_FS __MASK(63-36)
/* TEXASR Failure Summary */
#define TEXASR_FS __MASK(63-36)
/* TEXASR Failure Summary */
#define SPRN_TFHAR 0x80
/* Transaction Failure Handler Addr */
#define SPRN_TFHAR 0x80
/* Transaction Failure Handler Addr */
#define SPRN_TIDR 144
/* Thread ID register */
#define SPRN_CTRLF 0x088
#define SPRN_CTRLF 0x088
#define SPRN_CTRLT 0x098
#define SPRN_CTRLT 0x098
#define CTRL_CT 0xc0000000
/* current thread */
#define CTRL_CT 0xc0000000
/* current thread */
...
@@ -292,6 +295,7 @@
...
@@ -292,6 +295,7 @@
#define SPRN_HRMOR 0x139
/* Real mode offset register */
#define SPRN_HRMOR 0x139
/* Real mode offset register */
#define SPRN_HSRR0 0x13A
/* Hypervisor Save/Restore 0 */
#define SPRN_HSRR0 0x13A
/* Hypervisor Save/Restore 0 */
#define SPRN_HSRR1 0x13B
/* Hypervisor Save/Restore 1 */
#define SPRN_HSRR1 0x13B
/* Hypervisor Save/Restore 1 */
#define SPRN_ASDR 0x330
/* Access segment descriptor register */
#define SPRN_IC 0x350
/* Virtual Instruction Count */
#define SPRN_IC 0x350
/* Virtual Instruction Count */
#define SPRN_VTB 0x351
/* Virtual Time Base */
#define SPRN_VTB 0x351
/* Virtual Time Base */
#define SPRN_LDBAR 0x352
/* LD Base Address Register */
#define SPRN_LDBAR 0x352
/* LD Base Address Register */
...
@@ -302,6 +306,7 @@
...
@@ -302,6 +306,7 @@
#define SPRN_PMCR 0x374
/* Power Management Control Register */
#define SPRN_PMCR 0x374
/* Power Management Control Register */
/* HFSCR and FSCR bit numbers are the same */
/* HFSCR and FSCR bit numbers are the same */
#define FSCR_MSGP_LG 10
/* Enable MSGP */
#define FSCR_TAR_LG 8
/* Enable Target Address Register */
#define FSCR_TAR_LG 8
/* Enable Target Address Register */
#define FSCR_EBB_LG 7
/* Enable Event Based Branching */
#define FSCR_EBB_LG 7
/* Enable Event Based Branching */
#define FSCR_TM_LG 5
/* Enable Transactional Memory */
#define FSCR_TM_LG 5
/* Enable Transactional Memory */
...
@@ -315,6 +320,7 @@
...
@@ -315,6 +320,7 @@
#define FSCR_EBB __MASK(FSCR_EBB_LG)
#define FSCR_EBB __MASK(FSCR_EBB_LG)
#define FSCR_DSCR __MASK(FSCR_DSCR_LG)
#define FSCR_DSCR __MASK(FSCR_DSCR_LG)
#define SPRN_HFSCR 0xbe
/* HV=1 Facility Status & Control Register */
#define SPRN_HFSCR 0xbe
/* HV=1 Facility Status & Control Register */
#define HFSCR_MSGP __MASK(FSCR_MSGP_LG)
#define HFSCR_TAR __MASK(FSCR_TAR_LG)
#define HFSCR_TAR __MASK(FSCR_TAR_LG)
#define HFSCR_EBB __MASK(FSCR_EBB_LG)
#define HFSCR_EBB __MASK(FSCR_EBB_LG)
#define HFSCR_TM __MASK(FSCR_TM_LG)
#define HFSCR_TM __MASK(FSCR_TM_LG)
...
@@ -350,8 +356,10 @@
...
@@ -350,8 +356,10 @@
#define LPCR_PECE0 ASM_CONST(0x0000000000004000)
/* ext. exceptions can cause exit */
#define LPCR_PECE0 ASM_CONST(0x0000000000004000)
/* ext. exceptions can cause exit */
#define LPCR_PECE1 ASM_CONST(0x0000000000002000)
/* decrementer can cause exit */
#define LPCR_PECE1 ASM_CONST(0x0000000000002000)
/* decrementer can cause exit */
#define LPCR_PECE2 ASM_CONST(0x0000000000001000)
/* machine check etc can cause exit */
#define LPCR_PECE2 ASM_CONST(0x0000000000001000)
/* machine check etc can cause exit */
#define LPCR_PECE_HVEE ASM_CONST(0x0000400000000000)
/* P9 Wakeup on HV interrupts */
#define LPCR_MER ASM_CONST(0x0000000000000800)
/* Mediated External Exception */
#define LPCR_MER ASM_CONST(0x0000000000000800)
/* Mediated External Exception */
#define LPCR_MER_SH 11
#define LPCR_MER_SH 11
#define LPCR_GTSE ASM_CONST(0x0000000000000400)
/* Guest Translation Shootdown Enable */
#define LPCR_TC ASM_CONST(0x0000000000000200)
/* Translation control */
#define LPCR_TC ASM_CONST(0x0000000000000200)
/* Translation control */
#define LPCR_LPES 0x0000000c
#define LPCR_LPES 0x0000000c
#define LPCR_LPES0 ASM_CONST(0x0000000000000008)
/* LPAR Env selector 0 */
#define LPCR_LPES0 ASM_CONST(0x0000000000000008)
/* LPAR Env selector 0 */
...
@@ -372,6 +380,12 @@
...
@@ -372,6 +380,12 @@
#define PCR_VEC_DIS (1ul << (63-0))
/* Vec. disable (bit NA since POWER8) */
#define PCR_VEC_DIS (1ul << (63-0))
/* Vec. disable (bit NA since POWER8) */
#define PCR_VSX_DIS (1ul << (63-1))
/* VSX disable (bit NA since POWER8) */
#define PCR_VSX_DIS (1ul << (63-1))
/* VSX disable (bit NA since POWER8) */
#define PCR_TM_DIS (1ul << (63-2))
/* Trans. memory disable (POWER8) */
#define PCR_TM_DIS (1ul << (63-2))
/* Trans. memory disable (POWER8) */
/*
* These bits are used in the function kvmppc_set_arch_compat() to specify and
* determine both the compatibility level which we want to emulate and the
* compatibility level which the host is capable of emulating.
*/
#define PCR_ARCH_207 0x8
/* Architecture 2.07 */
#define PCR_ARCH_206 0x4
/* Architecture 2.06 */
#define PCR_ARCH_206 0x4
/* Architecture 2.06 */
#define PCR_ARCH_205 0x2
/* Architecture 2.05 */
#define PCR_ARCH_205 0x2
/* Architecture 2.05 */
#define SPRN_HEIR 0x153
/* Hypervisor Emulated Instruction Register */
#define SPRN_HEIR 0x153
/* Hypervisor Emulated Instruction Register */
...
@@ -1213,6 +1227,7 @@
...
@@ -1213,6 +1227,7 @@
#define PVR_ARCH_206 0x0f000003
#define PVR_ARCH_206 0x0f000003
#define PVR_ARCH_206p 0x0f100003
#define PVR_ARCH_206p 0x0f100003
#define PVR_ARCH_207 0x0f000004
#define PVR_ARCH_207 0x0f000004
#define PVR_ARCH_300 0x0f000005
/* Macros for setting and retrieving special purpose registers */
/* Macros for setting and retrieving special purpose registers */
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLY__
...
...
arch/powerpc/kernel/cpu_setup_power.S
View file @
ddbefe7e
...
@@ -174,7 +174,7 @@ __init_FSCR:
...
@@ -174,7 +174,7 @@ __init_FSCR:
__init_HFSCR
:
__init_HFSCR
:
mfspr
r3
,
SPRN_HFSCR
mfspr
r3
,
SPRN_HFSCR
ori
r3
,
r3
,
HFSCR_TAR|HFSCR_TM|HFSCR_BHRB|HFSCR_PM|
\
ori
r3
,
r3
,
HFSCR_TAR|HFSCR_TM|HFSCR_BHRB|HFSCR_PM|
\
HFSCR_DSCR|HFSCR_VECVSX|HFSCR_FP
|
HFSCR_EBB
HFSCR_DSCR|HFSCR_VECVSX|HFSCR_FP|HFSCR_EBB
|HFSCR_MSGP
mtspr
SPRN_HFSCR
,
r3
mtspr
SPRN_HFSCR
,
r3
blr
blr
...
...
arch/powerpc/mm/hash_native_64.c
View file @
ddbefe7e
...
@@ -223,13 +223,18 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
...
@@ -223,13 +223,18 @@ static long native_hpte_insert(unsigned long hpte_group, unsigned long vpn,
return
-
1
;
return
-
1
;
hpte_v
=
hpte_encode_v
(
vpn
,
psize
,
apsize
,
ssize
)
|
vflags
|
HPTE_V_VALID
;
hpte_v
=
hpte_encode_v
(
vpn
,
psize
,
apsize
,
ssize
)
|
vflags
|
HPTE_V_VALID
;
hpte_r
=
hpte_encode_r
(
pa
,
psize
,
apsize
,
ssize
)
|
rflags
;
hpte_r
=
hpte_encode_r
(
pa
,
psize
,
apsize
)
|
rflags
;
if
(
!
(
vflags
&
HPTE_V_BOLTED
))
{
if
(
!
(
vflags
&
HPTE_V_BOLTED
))
{
DBG_LOW
(
" i=%x hpte_v=%016lx, hpte_r=%016lx
\n
"
,
DBG_LOW
(
" i=%x hpte_v=%016lx, hpte_r=%016lx
\n
"
,
i
,
hpte_v
,
hpte_r
);
i
,
hpte_v
,
hpte_r
);
}
}
if
(
cpu_has_feature
(
CPU_FTR_ARCH_300
))
{
hpte_r
=
hpte_old_to_new_r
(
hpte_v
,
hpte_r
);
hpte_v
=
hpte_old_to_new_v
(
hpte_v
);
}
hptep
->
r
=
cpu_to_be64
(
hpte_r
);
hptep
->
r
=
cpu_to_be64
(
hpte_r
);
/* Guarantee the second dword is visible before the valid bit */
/* Guarantee the second dword is visible before the valid bit */
eieio
();
eieio
();
...
@@ -297,6 +302,8 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
...
@@ -297,6 +302,8 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
vpn
,
want_v
&
HPTE_V_AVPN
,
slot
,
newpp
);
vpn
,
want_v
&
HPTE_V_AVPN
,
slot
,
newpp
);
hpte_v
=
be64_to_cpu
(
hptep
->
v
);
hpte_v
=
be64_to_cpu
(
hptep
->
v
);
if
(
cpu_has_feature
(
CPU_FTR_ARCH_300
))
hpte_v
=
hpte_new_to_old_v
(
hpte_v
,
be64_to_cpu
(
hptep
->
r
));
/*
/*
* We need to invalidate the TLB always because hpte_remove doesn't do
* We need to invalidate the TLB always because hpte_remove doesn't do
* a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
* a tlb invalidate. If a hash bucket gets full, we "evict" a more/less
...
@@ -311,6 +318,8 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
...
@@ -311,6 +318,8 @@ static long native_hpte_updatepp(unsigned long slot, unsigned long newpp,
native_lock_hpte
(
hptep
);
native_lock_hpte
(
hptep
);
/* recheck with locks held */
/* recheck with locks held */
hpte_v
=
be64_to_cpu
(
hptep
->
v
);
hpte_v
=
be64_to_cpu
(
hptep
->
v
);
if
(
cpu_has_feature
(
CPU_FTR_ARCH_300
))
hpte_v
=
hpte_new_to_old_v
(
hpte_v
,
be64_to_cpu
(
hptep
->
r
));
if
(
unlikely
(
!
HPTE_V_COMPARE
(
hpte_v
,
want_v
)
||
if
(
unlikely
(
!
HPTE_V_COMPARE
(
hpte_v
,
want_v
)
||
!
(
hpte_v
&
HPTE_V_VALID
)))
{
!
(
hpte_v
&
HPTE_V_VALID
)))
{
ret
=
-
1
;
ret
=
-
1
;
...
@@ -352,6 +361,8 @@ static long native_hpte_find(unsigned long vpn, int psize, int ssize)
...
@@ -352,6 +361,8 @@ static long native_hpte_find(unsigned long vpn, int psize, int ssize)
for
(
i
=
0
;
i
<
HPTES_PER_GROUP
;
i
++
)
{
for
(
i
=
0
;
i
<
HPTES_PER_GROUP
;
i
++
)
{
hptep
=
htab_address
+
slot
;
hptep
=
htab_address
+
slot
;
hpte_v
=
be64_to_cpu
(
hptep
->
v
);
hpte_v
=
be64_to_cpu
(
hptep
->
v
);
if
(
cpu_has_feature
(
CPU_FTR_ARCH_300
))
hpte_v
=
hpte_new_to_old_v
(
hpte_v
,
be64_to_cpu
(
hptep
->
r
));
if
(
HPTE_V_COMPARE
(
hpte_v
,
want_v
)
&&
(
hpte_v
&
HPTE_V_VALID
))
if
(
HPTE_V_COMPARE
(
hpte_v
,
want_v
)
&&
(
hpte_v
&
HPTE_V_VALID
))
/* HPTE matches */
/* HPTE matches */
...
@@ -411,6 +422,8 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
...
@@ -411,6 +422,8 @@ static void native_hpte_invalidate(unsigned long slot, unsigned long vpn,
want_v
=
hpte_encode_avpn
(
vpn
,
bpsize
,
ssize
);
want_v
=
hpte_encode_avpn
(
vpn
,
bpsize
,
ssize
);
native_lock_hpte
(
hptep
);
native_lock_hpte
(
hptep
);
hpte_v
=
be64_to_cpu
(
hptep
->
v
);
hpte_v
=
be64_to_cpu
(
hptep
->
v
);
if
(
cpu_has_feature
(
CPU_FTR_ARCH_300
))
hpte_v
=
hpte_new_to_old_v
(
hpte_v
,
be64_to_cpu
(
hptep
->
r
));
/*
/*
* We need to invalidate the TLB always because hpte_remove doesn't do
* We need to invalidate the TLB always because hpte_remove doesn't do
...
@@ -469,6 +482,8 @@ static void native_hugepage_invalidate(unsigned long vsid,
...
@@ -469,6 +482,8 @@ static void native_hugepage_invalidate(unsigned long vsid,
want_v
=
hpte_encode_avpn
(
vpn
,
psize
,
ssize
);
want_v
=
hpte_encode_avpn
(
vpn
,
psize
,
ssize
);
native_lock_hpte
(
hptep
);
native_lock_hpte
(
hptep
);
hpte_v
=
be64_to_cpu
(
hptep
->
v
);
hpte_v
=
be64_to_cpu
(
hptep
->
v
);
if
(
cpu_has_feature
(
CPU_FTR_ARCH_300
))
hpte_v
=
hpte_new_to_old_v
(
hpte_v
,
be64_to_cpu
(
hptep
->
r
));
/* Even if we miss, we need to invalidate the TLB */
/* Even if we miss, we need to invalidate the TLB */
if
(
!
HPTE_V_COMPARE
(
hpte_v
,
want_v
)
||
!
(
hpte_v
&
HPTE_V_VALID
))
if
(
!
HPTE_V_COMPARE
(
hpte_v
,
want_v
)
||
!
(
hpte_v
&
HPTE_V_VALID
))
...
@@ -506,6 +521,10 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
...
@@ -506,6 +521,10 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
/* Look at the 8 bit LP value */
/* Look at the 8 bit LP value */
unsigned
int
lp
=
(
hpte_r
>>
LP_SHIFT
)
&
((
1
<<
LP_BITS
)
-
1
);
unsigned
int
lp
=
(
hpte_r
>>
LP_SHIFT
)
&
((
1
<<
LP_BITS
)
-
1
);
if
(
cpu_has_feature
(
CPU_FTR_ARCH_300
))
{
hpte_v
=
hpte_new_to_old_v
(
hpte_v
,
hpte_r
);
hpte_r
=
hpte_new_to_old_r
(
hpte_r
);
}
if
(
!
(
hpte_v
&
HPTE_V_LARGE
))
{
if
(
!
(
hpte_v
&
HPTE_V_LARGE
))
{
size
=
MMU_PAGE_4K
;
size
=
MMU_PAGE_4K
;
a_size
=
MMU_PAGE_4K
;
a_size
=
MMU_PAGE_4K
;
...
@@ -514,11 +533,7 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
...
@@ -514,11 +533,7 @@ static void hpte_decode(struct hash_pte *hpte, unsigned long slot,
a_size
=
hpte_page_sizes
[
lp
]
>>
4
;
a_size
=
hpte_page_sizes
[
lp
]
>>
4
;
}
}
/* This works for all page sizes, and for 256M and 1T segments */
/* This works for all page sizes, and for 256M and 1T segments */
if
(
cpu_has_feature
(
CPU_FTR_ARCH_300
))
*
ssize
=
hpte_v
>>
HPTE_V_SSIZE_SHIFT
;
*
ssize
=
hpte_r
>>
HPTE_R_3_0_SSIZE_SHIFT
;
else
*
ssize
=
hpte_v
>>
HPTE_V_SSIZE_SHIFT
;
shift
=
mmu_psize_defs
[
size
].
shift
;
shift
=
mmu_psize_defs
[
size
].
shift
;
avpn
=
(
HPTE_V_AVPN_VAL
(
hpte_v
)
&
~
mmu_psize_defs
[
size
].
avpnm
);
avpn
=
(
HPTE_V_AVPN_VAL
(
hpte_v
)
&
~
mmu_psize_defs
[
size
].
avpnm
);
...
@@ -641,6 +656,9 @@ static void native_flush_hash_range(unsigned long number, int local)
...
@@ -641,6 +656,9 @@ static void native_flush_hash_range(unsigned long number, int local)
want_v
=
hpte_encode_avpn
(
vpn
,
psize
,
ssize
);
want_v
=
hpte_encode_avpn
(
vpn
,
psize
,
ssize
);
native_lock_hpte
(
hptep
);
native_lock_hpte
(
hptep
);
hpte_v
=
be64_to_cpu
(
hptep
->
v
);
hpte_v
=
be64_to_cpu
(
hptep
->
v
);
if
(
cpu_has_feature
(
CPU_FTR_ARCH_300
))
hpte_v
=
hpte_new_to_old_v
(
hpte_v
,
be64_to_cpu
(
hptep
->
r
));
if
(
!
HPTE_V_COMPARE
(
hpte_v
,
want_v
)
||
if
(
!
HPTE_V_COMPARE
(
hpte_v
,
want_v
)
||
!
(
hpte_v
&
HPTE_V_VALID
))
!
(
hpte_v
&
HPTE_V_VALID
))
native_unlock_hpte
(
hptep
);
native_unlock_hpte
(
hptep
);
...
...
arch/powerpc/mm/hash_utils_64.c
View file @
ddbefe7e
...
@@ -792,37 +792,17 @@ static void update_hid_for_hash(void)
...
@@ -792,37 +792,17 @@ static void update_hid_for_hash(void)
static
void
__init
hash_init_partition_table
(
phys_addr_t
hash_table
,
static
void
__init
hash_init_partition_table
(
phys_addr_t
hash_table
,
unsigned
long
htab_size
)
unsigned
long
htab_size
)
{
{
unsigned
long
ps_field
;
mmu_partition_table_init
();
unsigned
long
patb_size
=
1UL
<<
PATB_SIZE_SHIFT
;
/*
/*
*
slb llp encoding for the page size used in VPM real mode
.
*
PS field (VRMA page size) is not used for LPID 0, hence set to 0
.
*
We can ignore that for lpid 0
*
For now, UPRT is 0 and we have no segment table.
*/
*/
ps_field
=
0
;
htab_size
=
__ilog2
(
htab_size
)
-
18
;
htab_size
=
__ilog2
(
htab_size
)
-
18
;
mmu_partition_table_set_entry
(
0
,
hash_table
|
htab_size
,
0
);
BUILD_BUG_ON_MSG
((
PATB_SIZE_SHIFT
>
24
),
"Partition table size too large."
);
partition_tb
=
__va
(
memblock_alloc_base
(
patb_size
,
patb_size
,
MEMBLOCK_ALLOC_ANYWHERE
));
/* Initialize the Partition Table with no entries */
memset
((
void
*
)
partition_tb
,
0
,
patb_size
);
partition_tb
->
patb0
=
cpu_to_be64
(
ps_field
|
hash_table
|
htab_size
);
/*
* FIXME!! This should be done via update_partition table
* For now UPRT is 0 for us.
*/
partition_tb
->
patb1
=
0
;
pr_info
(
"Partition table %p
\n
"
,
partition_tb
);
pr_info
(
"Partition table %p
\n
"
,
partition_tb
);
if
(
cpu_has_feature
(
CPU_FTR_POWER9_DD1
))
if
(
cpu_has_feature
(
CPU_FTR_POWER9_DD1
))
update_hid_for_hash
();
update_hid_for_hash
();
/*
* update partition table control register,
* 64 K size.
*/
mtspr
(
SPRN_PTCR
,
__pa
(
partition_tb
)
|
(
PATB_SIZE_SHIFT
-
12
));
}
}
static
void
__init
htab_initialize
(
void
)
static
void
__init
htab_initialize
(
void
)
...
...
arch/powerpc/mm/pgtable-radix.c
View file @
ddbefe7e
...
@@ -177,23 +177,15 @@ static void __init radix_init_pgtable(void)
...
@@ -177,23 +177,15 @@ static void __init radix_init_pgtable(void)
static
void
__init
radix_init_partition_table
(
void
)
static
void
__init
radix_init_partition_table
(
void
)
{
{
unsigned
long
rts_field
;
unsigned
long
rts_field
,
dw0
;
mmu_partition_table_init
();
rts_field
=
radix__get_tree_size
();
rts_field
=
radix__get_tree_size
();
dw0
=
rts_field
|
__pa
(
init_mm
.
pgd
)
|
RADIX_PGD_INDEX_SIZE
|
PATB_HR
;
mmu_partition_table_set_entry
(
0
,
dw0
,
0
);
BUILD_BUG_ON_MSG
((
PATB_SIZE_SHIFT
>
36
),
"Partition table size too large."
);
partition_tb
=
early_alloc_pgtable
(
1UL
<<
PATB_SIZE_SHIFT
);
partition_tb
->
patb0
=
cpu_to_be64
(
rts_field
|
__pa
(
init_mm
.
pgd
)
|
RADIX_PGD_INDEX_SIZE
|
PATB_HR
);
pr_info
(
"Initializing Radix MMU
\n
"
);
pr_info
(
"Initializing Radix MMU
\n
"
);
pr_info
(
"Partition table %p
\n
"
,
partition_tb
);
pr_info
(
"Partition table %p
\n
"
,
partition_tb
);
memblock_set_current_limit
(
MEMBLOCK_ALLOC_ANYWHERE
);
/*
* update partition table control register,
* 64 K size.
*/
mtspr
(
SPRN_PTCR
,
__pa
(
partition_tb
)
|
(
PATB_SIZE_SHIFT
-
12
));
}
}
void
__init
radix_init_native
(
void
)
void
__init
radix_init_native
(
void
)
...
@@ -378,6 +370,8 @@ void __init radix__early_init_mmu(void)
...
@@ -378,6 +370,8 @@ void __init radix__early_init_mmu(void)
radix_init_partition_table
();
radix_init_partition_table
();
}
}
memblock_set_current_limit
(
MEMBLOCK_ALLOC_ANYWHERE
);
radix_init_pgtable
();
radix_init_pgtable
();
}
}
...
...
arch/powerpc/mm/pgtable_64.c
View file @
ddbefe7e
...
@@ -431,3 +431,37 @@ void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
...
@@ -431,3 +431,37 @@ void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift)
}
}
}
}
#endif
#endif
#ifdef CONFIG_PPC_BOOK3S_64
void
__init
mmu_partition_table_init
(
void
)
{
unsigned
long
patb_size
=
1UL
<<
PATB_SIZE_SHIFT
;
BUILD_BUG_ON_MSG
((
PATB_SIZE_SHIFT
>
36
),
"Partition table size too large."
);
partition_tb
=
__va
(
memblock_alloc_base
(
patb_size
,
patb_size
,
MEMBLOCK_ALLOC_ANYWHERE
));
/* Initialize the Partition Table with no entries */
memset
((
void
*
)
partition_tb
,
0
,
patb_size
);
/*
* update partition table control register,
* 64 K size.
*/
mtspr
(
SPRN_PTCR
,
__pa
(
partition_tb
)
|
(
PATB_SIZE_SHIFT
-
12
));
}
void
mmu_partition_table_set_entry
(
unsigned
int
lpid
,
unsigned
long
dw0
,
unsigned
long
dw1
)
{
partition_tb
[
lpid
].
patb0
=
cpu_to_be64
(
dw0
);
partition_tb
[
lpid
].
patb1
=
cpu_to_be64
(
dw1
);
/* Global flush of TLBs and partition table caches for this lpid */
asm
volatile
(
"ptesync"
:
:
:
"memory"
);
asm
volatile
(
PPC_TLBIE_5
(
%
0
,
%
1
,
2
,
0
,
0
)
:
:
"r"
(
TLBIEL_INVAL_SET_LPID
),
"r"
(
lpid
));
asm
volatile
(
"eieio; tlbsync; ptesync"
:
:
:
"memory"
);
}
EXPORT_SYMBOL_GPL
(
mmu_partition_table_set_entry
);
#endif
/* CONFIG_PPC_BOOK3S_64 */
arch/powerpc/platforms/powernv/opal-wrappers.S
View file @
ddbefe7e
...
@@ -304,8 +304,11 @@ OPAL_CALL(opal_pci_get_presence_state, OPAL_PCI_GET_PRESENCE_STATE);
...
@@ -304,8 +304,11 @@ OPAL_CALL(opal_pci_get_presence_state, OPAL_PCI_GET_PRESENCE_STATE);
OPAL_CALL
(
opal_pci_get_power_state
,
OPAL_PCI_GET_POWER_STATE
)
;
OPAL_CALL
(
opal_pci_get_power_state
,
OPAL_PCI_GET_POWER_STATE
)
;
OPAL_CALL
(
opal_pci_set_power_state
,
OPAL_PCI_SET_POWER_STATE
)
;
OPAL_CALL
(
opal_pci_set_power_state
,
OPAL_PCI_SET_POWER_STATE
)
;
OPAL_CALL
(
opal_int_get_xirr
,
OPAL_INT_GET_XIRR
)
;
OPAL_CALL
(
opal_int_get_xirr
,
OPAL_INT_GET_XIRR
)
;
OPAL_CALL_REAL
(
opal_rm_int_get_xirr
,
OPAL_INT_GET_XIRR
)
;
OPAL_CALL
(
opal_int_set_cppr
,
OPAL_INT_SET_CPPR
)
;
OPAL_CALL
(
opal_int_set_cppr
,
OPAL_INT_SET_CPPR
)
;
OPAL_CALL
(
opal_int_eoi
,
OPAL_INT_EOI
)
;
OPAL_CALL
(
opal_int_eoi
,
OPAL_INT_EOI
)
;
OPAL_CALL_REAL
(
opal_rm_int_eoi
,
OPAL_INT_EOI
)
;
OPAL_CALL
(
opal_int_set_mfrr
,
OPAL_INT_SET_MFRR
)
;
OPAL_CALL
(
opal_int_set_mfrr
,
OPAL_INT_SET_MFRR
)
;
OPAL_CALL_REAL
(
opal_rm_int_set_mfrr
,
OPAL_INT_SET_MFRR
)
;
OPAL_CALL
(
opal_pci_tce_kill
,
OPAL_PCI_TCE_KILL
)
;
OPAL_CALL
(
opal_pci_tce_kill
,
OPAL_PCI_TCE_KILL
)
;
OPAL_CALL_REAL
(
opal_rm_pci_tce_kill
,
OPAL_PCI_TCE_KILL
)
;
OPAL_CALL_REAL
(
opal_rm_pci_tce_kill
,
OPAL_PCI_TCE_KILL
)
;
arch/powerpc/platforms/powernv/opal.c
View file @
ddbefe7e
...
@@ -886,3 +886,5 @@ EXPORT_SYMBOL_GPL(opal_leds_get_ind);
...
@@ -886,3 +886,5 @@ EXPORT_SYMBOL_GPL(opal_leds_get_ind);
EXPORT_SYMBOL_GPL
(
opal_leds_set_ind
);
EXPORT_SYMBOL_GPL
(
opal_leds_set_ind
);
/* Export this symbol for PowerNV Operator Panel class driver */
/* Export this symbol for PowerNV Operator Panel class driver */
EXPORT_SYMBOL_GPL
(
opal_write_oppanel_async
);
EXPORT_SYMBOL_GPL
(
opal_write_oppanel_async
);
/* Export this for KVM */
EXPORT_SYMBOL_GPL
(
opal_int_set_mfrr
);
arch/powerpc/platforms/ps3/htab.c
View file @
ddbefe7e
...
@@ -63,7 +63,7 @@ static long ps3_hpte_insert(unsigned long hpte_group, unsigned long vpn,
...
@@ -63,7 +63,7 @@ static long ps3_hpte_insert(unsigned long hpte_group, unsigned long vpn,
vflags
&=
~
HPTE_V_SECONDARY
;
vflags
&=
~
HPTE_V_SECONDARY
;
hpte_v
=
hpte_encode_v
(
vpn
,
psize
,
apsize
,
ssize
)
|
vflags
|
HPTE_V_VALID
;
hpte_v
=
hpte_encode_v
(
vpn
,
psize
,
apsize
,
ssize
)
|
vflags
|
HPTE_V_VALID
;
hpte_r
=
hpte_encode_r
(
ps3_mm_phys_to_lpar
(
pa
),
psize
,
apsize
,
ssize
)
|
rflags
;
hpte_r
=
hpte_encode_r
(
ps3_mm_phys_to_lpar
(
pa
),
psize
,
apsize
)
|
rflags
;
spin_lock_irqsave
(
&
ps3_htab_lock
,
flags
);
spin_lock_irqsave
(
&
ps3_htab_lock
,
flags
);
...
...
arch/powerpc/platforms/pseries/lpar.c
View file @
ddbefe7e
...
@@ -145,7 +145,7 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
...
@@ -145,7 +145,7 @@ static long pSeries_lpar_hpte_insert(unsigned long hpte_group,
hpte_group
,
vpn
,
pa
,
rflags
,
vflags
,
psize
);
hpte_group
,
vpn
,
pa
,
rflags
,
vflags
,
psize
);
hpte_v
=
hpte_encode_v
(
vpn
,
psize
,
apsize
,
ssize
)
|
vflags
|
HPTE_V_VALID
;
hpte_v
=
hpte_encode_v
(
vpn
,
psize
,
apsize
,
ssize
)
|
vflags
|
HPTE_V_VALID
;
hpte_r
=
hpte_encode_r
(
pa
,
psize
,
apsize
,
ssize
)
|
rflags
;
hpte_r
=
hpte_encode_r
(
pa
,
psize
,
apsize
)
|
rflags
;
if
(
!
(
vflags
&
HPTE_V_BOLTED
))
if
(
!
(
vflags
&
HPTE_V_BOLTED
))
pr_devel
(
" hpte_v=%016lx, hpte_r=%016lx
\n
"
,
hpte_v
,
hpte_r
);
pr_devel
(
" hpte_v=%016lx, hpte_r=%016lx
\n
"
,
hpte_v
,
hpte_r
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment