Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
9498b6ad
Commit
9498b6ad
authored
Nov 08, 2002
by
Paul Mackerras
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
PPC32: Make flush_icache_page a no-op, do the flush in update_mmu_cache.
parent
d9129109
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
63 additions
and
51 deletions
+63
-51
arch/ppc/mm/init.c
arch/ppc/mm/init.c
+51
-15
arch/ppc/mm/ppc_mmu.c
arch/ppc/mm/ppc_mmu.c
+0
-24
include/asm-ppc/cacheflush.h
include/asm-ppc/cacheflush.h
+1
-1
include/asm-ppc/tlbflush.h
include/asm-ppc/tlbflush.h
+11
-11
No files found.
arch/ppc/mm/init.c
View file @
9498b6ad
...
...
@@ -472,11 +472,23 @@ void __init mem_init(void)
if
(
agp_special_page
)
printk
(
KERN_INFO
"AGP special page: 0x%08lx
\n
"
,
agp_special_page
);
#endif
/* defined(CONFIG_ALL_PPC) */
#ifdef CONFIG_PPC_ISERIES
#ifdef CONFIG_PPC_ISERIES
create_virtual_bus_tce_table
();
#endif
/* CONFIG_PPC_ISERIES */
/* Make sure all our pagetable pages have page->mapping
and page->index set correctly. */
for
(
addr
=
KERNELBASE
;
addr
!=
0
;
addr
+=
PGDIR_SIZE
)
{
struct
page
*
pg
;
pmd_t
*
pmd
=
pmd_offset
(
pgd_offset_k
(
addr
),
addr
);
if
(
pmd_present
(
*
pmd
))
{
pg
=
pmd_page
(
*
pmd
);
pg
->
mapping
=
(
void
*
)
&
init_mm
;
pg
->
index
=
addr
;
}
}
mem_init_done
=
1
;
}
...
...
@@ -556,28 +568,17 @@ void flush_dcache_page(struct page *page)
clear_bit
(
PG_arch_1
,
&
page
->
flags
);
}
void
flush_icache_page
(
struct
vm_area_struct
*
vma
,
struct
page
*
page
)
{
unsigned
long
phys
;
if
(
page
->
mapping
&&
!
PageReserved
(
page
)
&&
!
test_bit
(
PG_arch_1
,
&
page
->
flags
))
{
phys
=
page_to_pfn
(
page
)
<<
PAGE_SHIFT
;
__flush_dcache_icache_phys
(
phys
);
set_bit
(
PG_arch_1
,
&
page
->
flags
);
}
}
void
clear_user_page
(
void
*
page
,
unsigned
long
vaddr
,
struct
page
*
pg
)
{
clear_page
(
page
);
clear_bit
(
PG_arch_1
,
&
pg
->
flags
);
}
void
copy_user_page
(
void
*
vto
,
void
*
vfrom
,
unsigned
long
vaddr
,
struct
page
*
pg
)
{
copy_page
(
vto
,
vfrom
);
__flush_dcache_icache
(
vto
);
clear_bit
(
PG_arch_1
,
&
pg
->
flags
);
}
void
flush_icache_user_range
(
struct
vm_area_struct
*
vma
,
struct
page
*
page
,
...
...
@@ -589,3 +590,38 @@ void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
flush_icache_range
(
maddr
,
maddr
+
len
);
kunmap
(
page
);
}
/*
* This is called at the end of handling a user page fault, when the
* fault has been handled by updating a PTE in the linux page tables.
* We use it to preload an HPTE into the hash table corresponding to
* the updated linux PTE.
*/
void
update_mmu_cache
(
struct
vm_area_struct
*
vma
,
unsigned
long
address
,
pte_t
pte
)
{
/* handle i-cache coherency */
unsigned
long
pfn
=
pte_pfn
(
pte
);
if
(
pfn_valid
(
pfn
))
{
struct
page
*
page
=
pfn_to_page
(
pfn
);
if
(
!
PageReserved
(
page
)
&&
!
test_bit
(
PG_arch_1
,
&
page
->
flags
))
{
__flush_dcache_icache
((
void
*
)
address
);
set_bit
(
PG_arch_1
,
&
page
->
flags
);
}
}
#ifdef CONFIG_PPC_STD_MMU
/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
if
(
Hash
!=
0
&&
pte_young
(
pte
))
{
struct
mm_struct
*
mm
;
pmd_t
*
pmd
;
mm
=
(
address
<
TASK_SIZE
)
?
vma
->
vm_mm
:
&
init_mm
;
pmd
=
pmd_offset
(
pgd_offset
(
mm
,
address
),
address
);
if
(
!
pmd_none
(
*
pmd
))
add_hash_page
(
mm
->
context
,
address
,
pmd_val
(
*
pmd
));
}
#endif
}
arch/ppc/mm/ppc_mmu.c
View file @
9498b6ad
...
...
@@ -280,27 +280,3 @@ void __init MMU_init_hw(void)
if
(
ppc_md
.
progress
)
ppc_md
.
progress
(
"hash:done"
,
0x205
);
}
/*
* This is called at the end of handling a user page fault, when the
* fault has been handled by updating a PTE in the linux page tables.
* We use it to preload an HPTE into the hash table corresponding to
* the updated linux PTE.
*/
void
update_mmu_cache
(
struct
vm_area_struct
*
vma
,
unsigned
long
address
,
pte_t
pte
)
{
struct
mm_struct
*
mm
;
pmd_t
*
pmd
;
static
int
nopreload
;
if
(
Hash
==
0
||
nopreload
)
return
;
/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
if
(
!
pte_young
(
pte
))
return
;
mm
=
(
address
<
TASK_SIZE
)
?
vma
->
vm_mm
:
&
init_mm
;
pmd
=
pmd_offset
(
pgd_offset
(
mm
,
address
),
address
);
if
(
!
pmd_none
(
*
pmd
))
add_hash_page
(
mm
->
context
,
address
,
pmd_val
(
*
pmd
));
}
include/asm-ppc/cacheflush.h
View file @
9498b6ad
...
...
@@ -24,9 +24,9 @@
#define flush_cache_range(vma, a, b) do { } while (0)
#define flush_cache_page(vma, p) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
#define flush_icache_page(vma, page) do { } while (0)
extern
void
flush_dcache_page
(
struct
page
*
page
);
extern
void
flush_icache_page
(
struct
vm_area_struct
*
vma
,
struct
page
*
page
);
extern
void
flush_icache_range
(
unsigned
long
,
unsigned
long
);
extern
void
flush_icache_user_range
(
struct
vm_area_struct
*
vma
,
struct
page
*
page
,
unsigned
long
addr
,
int
len
);
...
...
include/asm-ppc/tlbflush.h
View file @
9498b6ad
...
...
@@ -30,7 +30,6 @@ static inline void flush_tlb_range(struct vm_area_struct *vma,
static
inline
void
flush_tlb_kernel_range
(
unsigned
long
start
,
unsigned
long
end
)
{
_tlbia
();
}
#define update_mmu_cache(vma, addr, pte) do { } while (0)
#elif defined(CONFIG_8xx)
#define __tlbia() asm volatile ("tlbia; sync" : : : "memory")
...
...
@@ -46,7 +45,6 @@ static inline void flush_tlb_range(struct mm_struct *mm,
static
inline
void
flush_tlb_kernel_range
(
unsigned
long
start
,
unsigned
long
end
)
{
__tlbia
();
}
#define update_mmu_cache(vma, addr, pte) do { } while (0)
#else
/* 6xx, 7xx, 7xxx cpus */
struct
mm_struct
;
...
...
@@ -56,15 +54,6 @@ extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr);
extern
void
flush_tlb_range
(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
unsigned
long
end
);
extern
void
flush_tlb_kernel_range
(
unsigned
long
start
,
unsigned
long
end
);
/*
* This gets called at the end of handling a page fault, when
* the kernel has put a new PTE into the page table for the process.
* We use it to put a corresponding HPTE into the hash table
* ahead of time, instead of waiting for the inevitable extra
* hash-table miss exception.
*/
extern
void
update_mmu_cache
(
struct
vm_area_struct
*
,
unsigned
long
,
pte_t
);
#endif
/*
...
...
@@ -77,5 +66,16 @@ static inline void flush_tlb_pgtables(struct mm_struct *mm,
{
}
/*
* This gets called at the end of handling a page fault, when
* the kernel has put a new PTE into the page table for the process.
* We use it to ensure coherency between the i-cache and d-cache
* for the page which has just been mapped in.
* On machines which use an MMU hash table, we use this to put a
* corresponding HPTE into the hash table ahead of time, instead of
* waiting for the inevitable extra hash-table miss exception.
*/
extern
void
update_mmu_cache
(
struct
vm_area_struct
*
,
unsigned
long
,
pte_t
);
#endif
/* _PPC_TLBFLUSH_H */
#endif
/*__KERNEL__ */
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment