Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
a6ee6a31
Commit
a6ee6a31
authored
Sep 09, 2003
by
Anton Blanchard
Browse files
Options
Browse Files
Download
Plain Diff
Merge samba.org:/scratch/anton/export into samba.org:/scratch/anton/tmp3
parents
5e7bd347
32737e43
Changes
13
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
13 changed files
with
1047 additions
and
63 deletions
+1047
-63
arch/ppc64/Kconfig
arch/ppc64/Kconfig
+11
-0
arch/ppc64/kernel/htab.c
arch/ppc64/kernel/htab.c
+7
-3
arch/ppc64/kernel/pSeries_lpar.c
arch/ppc64/kernel/pSeries_lpar.c
+7
-34
arch/ppc64/kernel/stab.c
arch/ppc64/kernel/stab.c
+18
-12
arch/ppc64/kernel/xics.c
arch/ppc64/kernel/xics.c
+4
-4
arch/ppc64/mm/Makefile
arch/ppc64/mm/Makefile
+1
-0
arch/ppc64/mm/hugetlbpage.c
arch/ppc64/mm/hugetlbpage.c
+925
-0
arch/ppc64/mm/init.c
arch/ppc64/mm/init.c
+4
-2
include/asm-ppc64/hvcall.h
include/asm-ppc64/hvcall.h
+0
-3
include/asm-ppc64/mmu.h
include/asm-ppc64/mmu.h
+6
-0
include/asm-ppc64/mmu_context.h
include/asm-ppc64/mmu_context.h
+4
-1
include/asm-ppc64/page.h
include/asm-ppc64/page.h
+33
-0
include/asm-ppc64/pgtable.h
include/asm-ppc64/pgtable.h
+27
-4
No files found.
arch/ppc64/Kconfig
View file @
a6ee6a31
...
...
@@ -69,6 +69,17 @@ config PPC64
bool
default y
config HUGETLB_PAGE
bool "Huge TLB Page Support"
help
This enables support for huge pages. User space applications
can make use of this support with the sys_alloc_hugepages and
sys_free_hugepages system calls. If your applications are
huge page aware and your processor supports this (only POWER4,
then say Y here.
Otherwise, say N.
config SMP
bool "Symmetric multi-processing support"
---help---
...
...
arch/ppc64/kernel/htab.c
View file @
a6ee6a31
...
...
@@ -197,7 +197,7 @@ pte_t *find_linux_pte(pgd_t *pgdir, unsigned long ea)
if
(
!
pgd_none
(
*
pg
))
{
pm
=
pmd_offset
(
pg
,
ea
);
if
(
!
pmd_none
(
*
pm
))
{
if
(
pmd_present
(
*
pm
))
{
pt
=
pte_offset_kernel
(
pm
,
ea
);
pte
=
*
pt
;
if
(
!
pte_present
(
pte
))
...
...
@@ -436,8 +436,12 @@ int hash_page(unsigned long ea, unsigned long access, unsigned long trap)
if
(
user_region
&&
cpus_equal
(
mm
->
cpu_vm_mask
,
tmp
))
local
=
1
;
ptep
=
find_linux_pte
(
pgdir
,
ea
);
ret
=
__hash_page
(
ea
,
access
,
vsid
,
ptep
,
trap
,
local
);
ret
=
hash_huge_page
(
mm
,
access
,
ea
,
vsid
,
local
);
if
(
ret
<
0
)
{
ptep
=
find_linux_pte
(
pgdir
,
ea
);
ret
=
__hash_page
(
ea
,
access
,
vsid
,
ptep
,
trap
,
local
);
}
spin_unlock
(
&
mm
->
page_table_lock
);
return
ret
;
...
...
arch/ppc64/kernel/pSeries_lpar.c
View file @
a6ee6a31
...
...
@@ -36,18 +36,6 @@
#include <asm/tlb.h>
#include <asm/hvcall.h>
long
plpar_pte_enter
(
unsigned
long
flags
,
unsigned
long
ptex
,
unsigned
long
new_pteh
,
unsigned
long
new_ptel
,
unsigned
long
*
old_pteh_ret
,
unsigned
long
*
old_ptel_ret
)
{
unsigned
long
dummy
,
ret
;
ret
=
plpar_hcall
(
H_ENTER
,
flags
,
ptex
,
new_pteh
,
new_ptel
,
old_pteh_ret
,
old_ptel_ret
,
&
dummy
);
return
(
ret
);
}
long
plpar_pte_remove
(
unsigned
long
flags
,
unsigned
long
ptex
,
unsigned
long
avpn
,
...
...
@@ -83,7 +71,6 @@ long plpar_tce_get(unsigned long liobn,
tce_ret
,
&
dummy
,
&
dummy
);
}
long
plpar_tce_put
(
unsigned
long
liobn
,
unsigned
long
ioba
,
unsigned
long
tceval
)
...
...
@@ -104,10 +91,9 @@ long plpar_put_term_char(unsigned long termno,
unsigned
long
len
,
const
char
*
buffer
)
{
unsigned
long
dummy
;
unsigned
long
*
lbuf
=
(
unsigned
long
*
)
buffer
;
/* ToDo: alignment? */
return
plpar_hcall
(
H_PUT_TERM_CHAR
,
termno
,
len
,
lbuf
[
0
],
lbuf
[
1
],
&
dummy
,
&
dummy
,
&
dummy
);
return
plpar_hcall
_norets
(
H_PUT_TERM_CHAR
,
termno
,
len
,
lbuf
[
0
]
,
lbuf
[
1
]
);
}
static
void
tce_build_pSeriesLP
(
struct
TceTable
*
tbl
,
long
tcenum
,
...
...
@@ -287,12 +273,11 @@ int hvc_get_chars(int index, char *buf, int count)
int
hvc_put_chars
(
int
index
,
const
char
*
buf
,
int
count
)
{
unsigned
long
dummy
;
unsigned
long
*
lbuf
=
(
unsigned
long
*
)
buf
;
long
ret
;
ret
=
plpar_hcall
(
H_PUT_TERM_CHAR
,
index
,
count
,
lbuf
[
0
],
lbuf
[
1
],
&
dummy
,
&
dummy
,
&
dummy
);
ret
=
plpar_hcall
_norets
(
H_PUT_TERM_CHAR
,
index
,
count
,
lbuf
[
0
],
lbuf
[
1
]
);
if
(
ret
==
H_Success
)
return
count
;
if
(
ret
==
H_Busy
)
...
...
@@ -318,7 +303,6 @@ int hvc_count(int *start_termno)
long
pSeries_lpar_hpte_insert
(
unsigned
long
hpte_group
,
unsigned
long
va
,
unsigned
long
prpn
,
int
secondary
,
unsigned
long
hpteflags
,
...
...
@@ -329,6 +313,7 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group,
unsigned
long
flags
;
unsigned
long
slot
;
HPTE
lhpte
;
unsigned
long
dummy0
,
dummy1
;
/* Fill in the local HPTE with absolute rpn, avpn and flags */
lhpte
.
dw1
.
dword1
=
0
;
...
...
@@ -348,7 +333,6 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group,
/* Now fill in the actual HPTE */
/* Set CEC cookie to 0 */
/* Large page = 0 */
/* Zero page = 0 */
/* I-cache Invalidate = 0 */
/* I-cache synchronize = 0 */
...
...
@@ -359,19 +343,8 @@ long pSeries_lpar_hpte_insert(unsigned long hpte_group,
if
(
hpteflags
&
(
_PAGE_GUARDED
|
_PAGE_NO_CACHE
))
lhpte
.
dw1
.
flags
.
flags
&=
~
_PAGE_COHERENT
;
__asm__
__volatile__
(
H_ENTER_r3
"mr 4, %2
\n
"
"mr 5, %3
\n
"
"mr 6, %4
\n
"
"mr 7, %5
\n
"
HSC
"mr %0, 3
\n
"
"mr %1, 4
\n
"
:
"=r"
(
lpar_rc
),
"=r"
(
slot
)
:
"r"
(
flags
),
"r"
(
hpte_group
),
"r"
(
lhpte
.
dw0
.
dword0
),
"r"
(
lhpte
.
dw1
.
dword1
)
:
"r3"
,
"r4"
,
"r5"
,
"r6"
,
"r7"
,
"cc"
);
lpar_rc
=
plpar_hcall
(
H_ENTER
,
flags
,
hpte_group
,
lhpte
.
dw0
.
dword0
,
lhpte
.
dw1
.
dword1
,
&
slot
,
&
dummy0
,
&
dummy1
);
if
(
lpar_rc
==
H_PTEG_Full
)
return
-
1
;
...
...
arch/ppc64/kernel/stab.c
View file @
a6ee6a31
...
...
@@ -221,15 +221,18 @@ void make_slbe(unsigned long esid, unsigned long vsid, int large,
}
static
inline
void
__ste_allocate
(
unsigned
long
esid
,
unsigned
long
vsid
,
int
kernel_segment
)
int
kernel_segment
,
mm_context_t
context
)
{
if
(
cur_cpu_spec
->
cpu_features
&
CPU_FTR_SLB
)
{
int
large
=
0
;
#ifndef CONFIG_PPC_ISERIES
if
(
REGION_ID
(
esid
<<
SID_SHIFT
)
==
KERNEL_REGION_ID
)
make_slbe
(
esid
,
vsid
,
1
,
kernel_segment
);
else
large
=
1
;
else
if
(
REGION_ID
(
esid
<<
SID_SHIFT
)
==
USER_REGION_ID
)
large
=
in_hugepage_area
(
context
,
esid
<<
SID_SHIFT
);
#endif
make_slbe
(
esid
,
vsid
,
0
,
kernel_segment
);
make_slbe
(
esid
,
vsid
,
large
,
kernel_segment
);
}
else
{
unsigned
char
top_entry
,
stab_entry
,
*
segments
;
...
...
@@ -255,6 +258,7 @@ int ste_allocate(unsigned long ea)
{
unsigned
long
vsid
,
esid
;
int
kernel_segment
=
0
;
mm_context_t
context
;
PMC_SW_PROCESSOR
(
stab_faults
);
...
...
@@ -266,16 +270,18 @@ int ste_allocate(unsigned long ea)
if
(
REGION_ID
(
ea
)
>=
KERNEL_REGION_ID
)
{
kernel_segment
=
1
;
vsid
=
get_kernel_vsid
(
ea
);
context
=
REGION_ID
(
ea
);
}
else
{
struct
mm_struct
*
mm
=
current
->
mm
;
if
(
mm
)
vsid
=
get_vsid
(
mm
->
context
,
ea
);
else
if
(
!
current
->
mm
)
return
1
;
context
=
current
->
mm
->
context
;
vsid
=
get_vsid
(
context
,
ea
);
}
esid
=
GET_ESID
(
ea
);
__ste_allocate
(
esid
,
vsid
,
kernel_segment
);
__ste_allocate
(
esid
,
vsid
,
kernel_segment
,
context
);
if
(
!
(
cur_cpu_spec
->
cpu_features
&
CPU_FTR_SLB
))
{
/* Order update */
asm
volatile
(
"sync"
:::
"memory"
);
...
...
@@ -302,7 +308,7 @@ static void preload_stab(struct task_struct *tsk, struct mm_struct *mm)
for
(
esid
=
0
;
esid
<
16
;
esid
++
)
{
unsigned
long
ea
=
esid
<<
SID_SHIFT
;
vsid
=
get_vsid
(
mm
->
context
,
ea
);
__ste_allocate
(
esid
,
vsid
,
0
);
__ste_allocate
(
esid
,
vsid
,
0
,
mm
->
context
);
}
}
else
{
unsigned
long
pc
=
KSTK_EIP
(
tsk
);
...
...
@@ -316,7 +322,7 @@ static void preload_stab(struct task_struct *tsk, struct mm_struct *mm)
(
REGION_ID
(
pc
)
>=
KERNEL_REGION_ID
))
return
;
vsid
=
get_vsid
(
mm
->
context
,
pc
);
__ste_allocate
(
GET_ESID
(
pc
),
vsid
,
0
);
__ste_allocate
(
GET_ESID
(
pc
),
vsid
,
0
,
mm
->
context
);
}
if
(
stack
&&
(
pc_segment
!=
stack_segment
))
{
...
...
@@ -324,7 +330,7 @@ static void preload_stab(struct task_struct *tsk, struct mm_struct *mm)
(
REGION_ID
(
stack
)
>=
KERNEL_REGION_ID
))
return
;
vsid
=
get_vsid
(
mm
->
context
,
stack
);
__ste_allocate
(
GET_ESID
(
stack
),
vsid
,
0
);
__ste_allocate
(
GET_ESID
(
stack
),
vsid
,
0
,
mm
->
context
);
}
}
...
...
arch/ppc64/kernel/xics.c
View file @
a6ee6a31
...
...
@@ -384,12 +384,12 @@ void xics_init_IRQ(void)
int
i
;
unsigned
long
intr_size
=
0
;
struct
device_node
*
np
;
uint
*
ireg
,
ilen
,
indx
=
0
;
uint
*
ireg
,
ilen
,
indx
=
0
;
unsigned
long
intr_base
=
0
;
struct
xics_interrupt_node
{
unsigned
long
long
addr
;
unsigned
long
long
size
;
}
inodes
[
NR_CPUS
*
2
];
unsigned
long
addr
;
unsigned
long
size
;
}
inodes
[
NR_CPUS
];
ppc64_boot_msg
(
0x20
,
"XICS Init"
);
...
...
arch/ppc64/mm/Makefile
View file @
a6ee6a31
...
...
@@ -6,3 +6,4 @@ EXTRA_CFLAGS += -mno-minimal-toc
obj-y
:=
fault.o init.o extable.o imalloc.o
obj-$(CONFIG_DISCONTIGMEM)
+=
numa.o
obj-$(CONFIG_HUGETLB_PAGE)
+=
hugetlbpage.o
arch/ppc64/mm/hugetlbpage.c
0 → 100644
View file @
a6ee6a31
This diff is collapsed.
Click to expand it.
arch/ppc64/mm/init.c
View file @
a6ee6a31
...
...
@@ -290,7 +290,7 @@ flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
if
(
!
pgd_none
(
*
pgd
))
{
pmd
=
pmd_offset
(
pgd
,
vmaddr
);
if
(
!
pmd_none
(
*
pmd
))
{
if
(
pmd_present
(
*
pmd
))
{
ptep
=
pte_offset_kernel
(
pmd
,
vmaddr
);
/* Check if HPTE might exist and flush it if so */
pte
=
__pte
(
pte_update
(
ptep
,
_PAGE_HPTEFLAGS
,
0
));
...
...
@@ -298,6 +298,7 @@ flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
flush_hash_page
(
context
,
vmaddr
,
pte
,
local
);
}
}
WARN_ON
(
pmd_hugepage
(
*
pmd
));
}
}
...
...
@@ -348,7 +349,7 @@ __flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
pmd_end
=
(
start
+
PMD_SIZE
)
&
PMD_MASK
;
if
(
pmd_end
>
end
)
pmd_end
=
end
;
if
(
!
pmd_none
(
*
pmd
))
{
if
(
pmd_present
(
*
pmd
))
{
ptep
=
pte_offset_kernel
(
pmd
,
start
);
do
{
if
(
pte_val
(
*
ptep
)
&
_PAGE_HASHPTE
)
{
...
...
@@ -367,6 +368,7 @@ __flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
++
ptep
;
}
while
(
start
<
pmd_end
);
}
else
{
WARN_ON
(
pmd_hugepage
(
*
pmd
));
start
=
pmd_end
;
}
++
pmd
;
...
...
include/asm-ppc64/hvcall.h
View file @
a6ee6a31
...
...
@@ -59,9 +59,6 @@
#define H_XIRR 0x74
#define H_PERFMON 0x7c
#define HSC ".long 0x44000022\n"
#define H_ENTER_r3 "li 3, 0x08\n"
/* plpar_hcall() -- Generic call interface using above opcodes
*
* The actual call interface is a hypervisor call instruction with
...
...
include/asm-ppc64/mmu.h
View file @
a6ee6a31
...
...
@@ -18,6 +18,12 @@
/* Default "unsigned long" context */
typedef
unsigned
long
mm_context_t
;
#ifdef CONFIG_HUGETLB_PAGE
#define CONTEXT_LOW_HPAGES (1UL<<63)
#else
#define CONTEXT_LOW_HPAGES 0
#endif
/*
* Define the size of the cache used for segment table entries. The first
* entry is used as a cache pointer, therefore the actual number of entries
...
...
include/asm-ppc64/mmu_context.h
View file @
a6ee6a31
...
...
@@ -127,7 +127,8 @@ destroy_context(struct mm_struct *mm)
#endif
mmu_context_queue
.
size
++
;
mmu_context_queue
.
elements
[
index
]
=
mm
->
context
;
mmu_context_queue
.
elements
[
index
]
=
mm
->
context
&
~
CONTEXT_LOW_HPAGES
;
spin_unlock_irqrestore
(
&
mmu_context_queue
.
lock
,
flags
);
}
...
...
@@ -189,6 +190,8 @@ get_vsid( unsigned long context, unsigned long ea )
{
unsigned
long
ordinal
,
vsid
;
context
&=
~
CONTEXT_LOW_HPAGES
;
ordinal
=
(((
ea
>>
28
)
&
0x1fffff
)
*
LAST_USER_CONTEXT
)
|
context
;
vsid
=
(
ordinal
*
VSID_RANDOMIZER
)
&
VSID_MASK
;
...
...
include/asm-ppc64/page.h
View file @
a6ee6a31
...
...
@@ -22,6 +22,39 @@
#define PAGE_MASK (~(PAGE_SIZE-1))
#define PAGE_OFFSET_MASK (PAGE_SIZE-1)
#ifdef CONFIG_HUGETLB_PAGE
#define HPAGE_SHIFT 24
#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
/* For 64-bit processes the hugepage range is 1T-1.5T */
#define TASK_HPAGE_BASE (0x0000010000000000UL)
#define TASK_HPAGE_END (0x0000018000000000UL)
/* For 32-bit processes the hugepage range is 2-3G */
#define TASK_HPAGE_BASE_32 (0x80000000UL)
#define TASK_HPAGE_END_32 (0xc0000000UL)
#define ARCH_HAS_HUGEPAGE_ONLY_RANGE
#define is_hugepage_only_range(addr, len) \
( ((addr > (TASK_HPAGE_BASE-len)) && (addr < TASK_HPAGE_END)) || \
((current->mm->context & CONTEXT_LOW_HPAGES) && \
(addr > (TASK_HPAGE_BASE_32-len)) && (addr < TASK_HPAGE_END_32)) )
#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA
#define in_hugepage_area(context, addr) \
((cur_cpu_spec->cpu_features & CPU_FTR_16M_PAGE) && \
((((addr) >= TASK_HPAGE_BASE) && ((addr) < TASK_HPAGE_END)) || \
(((context) & CONTEXT_LOW_HPAGES) && \
(((addr) >= TASK_HPAGE_BASE_32) && ((addr) < TASK_HPAGE_END_32)))))
#else
/* !CONFIG_HUGETLB_PAGE */
#define in_hugepage_area(mm, addr) 0
#endif
/* !CONFIG_HUGETLB_PAGE */
#define SID_SHIFT 28
#define SID_MASK 0xfffffffff
#define GET_ESID(x) (((x) >> SID_SHIFT) & SID_MASK)
...
...
include/asm-ppc64/pgtable.h
View file @
a6ee6a31
...
...
@@ -149,6 +149,25 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
/* shift to put page number into pte */
#define PTE_SHIFT (16)
/* We allow 2^41 bytes of real memory, so we need 29 bits in the PMD
* to give the PTE page number. The bottom two bits are for flags. */
#define PMD_TO_PTEPAGE_SHIFT (2)
#ifdef CONFIG_HUGETLB_PAGE
#define _PMD_HUGEPAGE 0x00000001U
#define HUGEPTE_BATCH_SIZE (1<<(HPAGE_SHIFT-PMD_SHIFT))
int
hash_huge_page
(
struct
mm_struct
*
mm
,
unsigned
long
access
,
unsigned
long
ea
,
unsigned
long
vsid
,
int
local
);
#define HAVE_ARCH_UNMAPPED_AREA
#else
#define hash_huge_page(mm,a,ea,vsid,local) -1
#define _PMD_HUGEPAGE 0
#endif
#ifndef __ASSEMBLY__
/*
...
...
@@ -178,12 +197,16 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
#define pte_pfn(x) ((unsigned long)((pte_val(x) >> PTE_SHIFT)))
#define pte_page(x) pfn_to_page(pte_pfn(x))
#define pmd_set(pmdp, ptep) (pmd_val(*(pmdp)) = (__ba_to_bpn(ptep)))
#define pmd_set(pmdp, ptep) \
(pmd_val(*(pmdp)) = (__ba_to_bpn(ptep) << PMD_TO_PTEPAGE_SHIFT))
#define pmd_none(pmd) (!pmd_val(pmd))
#define pmd_bad(pmd) ((pmd_val(pmd)) == 0)
#define pmd_present(pmd) ((pmd_val(pmd)) != 0)
#define pmd_hugepage(pmd) (!!(pmd_val(pmd) & _PMD_HUGEPAGE))
#define pmd_bad(pmd) (((pmd_val(pmd)) == 0) || pmd_hugepage(pmd))
#define pmd_present(pmd) ((!pmd_hugepage(pmd)) \
&& (pmd_val(pmd) & ~_PMD_HUGEPAGE) != 0)
#define pmd_clear(pmdp) (pmd_val(*(pmdp)) = 0)
#define pmd_page_kernel(pmd) (__bpn_to_ba(pmd_val(pmd)))
#define pmd_page_kernel(pmd) \
(__bpn_to_ba(pmd_val(pmd) >> PMD_TO_PTEPAGE_SHIFT))
#define pmd_page(pmd) virt_to_page(pmd_page_kernel(pmd))
#define pgd_set(pgdp, pmdp) (pgd_val(*(pgdp)) = (__ba_to_bpn(pmdp)))
#define pgd_none(pgd) (!pgd_val(pgd))
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment