Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
f25fcf8b
Commit
f25fcf8b
authored
Apr 09, 2002
by
Paul Mackerras
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
PPC update; create cacheflush.h and tlbflush.h and define default_idle
parent
e9bcdadc
Changes
10
Hide whitespace changes
Inline
Side-by-side
Showing
10 changed files
with
234 additions
and
187 deletions
+234
-187
arch/ppc/kernel/idle.c
arch/ppc/kernel/idle.c
+27
-30
arch/ppc/kernel/semaphore.c
arch/ppc/kernel/semaphore.c
+1
-0
arch/ppc/mm/mmu_decl.h
arch/ppc/mm/mmu_decl.h
+1
-0
arch/ppc/mm/pgtable.c
arch/ppc/mm/pgtable.c
+65
-0
include/asm-ppc/cacheflush.h
include/asm-ppc/cacheflush.h
+41
-0
include/asm-ppc/highmem.h
include/asm-ppc/highmem.h
+1
-1
include/asm-ppc/pgalloc.h
include/asm-ppc/pgalloc.h
+8
-67
include/asm-ppc/pgtable.h
include/asm-ppc/pgtable.h
+0
-89
include/asm-ppc/thread_info.h
include/asm-ppc/thread_info.h
+2
-0
include/asm-ppc/tlbflush.h
include/asm-ppc/tlbflush.h
+88
-0
No files found.
arch/ppc/kernel/idle.c
View file @
f25fcf8b
...
...
@@ -52,49 +52,45 @@ void power_save(void);
unsigned
long
zero_paged_on
;
unsigned
long
powersave_nap
;
int
idled
(
void
)
void
default_idle
(
void
)
{
int
do_power_save
=
0
;
if
(
cur_cpu_spec
[
smp_processor_id
()]
->
cpu_features
&
CPU_FTR_CAN_DOZE
)
do_power_save
=
1
;
/* endless loop with no priority at all */
for
(;;)
{
#ifdef CONFIG_PPC_ISERIES
if
(
!
current
->
need_resched
)
{
/* Turn off the run light */
run_light_on
(
0
);
yield_shared_processor
();
}
HMT_low
();
if
(
!
current
->
need_resched
)
{
/* Turn off the run light */
run_light_on
(
0
);
yield_shared_processor
();
}
HMT_low
();
#endif
#ifdef CONFIG_SMP
if
(
!
do_power_save
)
{
if
(
!
need_resched
())
{
set_thread_flag
(
TIF_POLLING_NRFLAG
);
while
(
!
test_thread_flag
(
TIF_NEED_RESCHED
))
barrier
();
clear_thread_flag
(
TIF_POLLING_NRFLAG
);
}
if
(
!
do_power_save
)
{
if
(
!
need_resched
())
{
set_thread_flag
(
TIF_POLLING_NRFLAG
);
while
(
!
test_thread_flag
(
TIF_NEED_RESCHED
))
barrier
();
clear_thread_flag
(
TIF_POLLING_NRFLAG
);
}
}
#endif
if
(
do_power_save
&&
!
need_resched
())
power_save
();
if
(
do_power_save
&&
!
need_resched
())
power_save
();
if
(
need_resched
())
{
run_light_on
(
1
);
schedule
();
}
if
(
need_resched
())
{
run_light_on
(
1
);
schedule
();
}
#ifdef CONFIG_PPC_ISERIES
else
{
run_light_on
(
0
);
yield_shared_processor
();
HMT_low
();
}
#endif
/* CONFIG_PPC_ISERIES */
else
{
run_light_on
(
0
);
yield_shared_processor
();
HMT_low
();
}
return
0
;
#endif
/* CONFIG_PPC_ISERIES */
}
/*
...
...
@@ -103,7 +99,8 @@ int idled(void)
*/
int
cpu_idle
(
void
)
{
idled
();
for
(;;)
default_idle
();
return
0
;
}
...
...
arch/ppc/kernel/semaphore.c
View file @
f25fcf8b
...
...
@@ -20,6 +20,7 @@
#include <linux/sched.h>
#include <asm/atomic.h>
#include <asm/semaphore.h>
#include <asm/errno.h>
/*
* Atomically update sem->count.
...
...
arch/ppc/mm/mmu_decl.h
View file @
f25fcf8b
...
...
@@ -22,6 +22,7 @@
* 2 of the License, or (at your option) any later version.
*
*/
#include <asm/tlbflush.h>
extern
void
mapin_ram
(
void
);
extern
void
bat_mapin_ram
(
void
);
...
...
arch/ppc/mm/pgtable.c
View file @
f25fcf8b
...
...
@@ -28,6 +28,7 @@
#include <linux/types.h>
#include <linux/vmalloc.h>
#include <linux/init.h>
#include <linux/highmem.h>
#include <asm/pgtable.h>
#include <asm/pgalloc.h>
...
...
@@ -56,6 +57,70 @@ void setbat(int index, unsigned long virt, unsigned long phys,
#define p_mapped_by_bats(x) (0UL)
#endif
/* HAVE_BATS */
pgd_t
*
pgd_alloc
(
struct
mm_struct
*
mm
)
{
pgd_t
*
ret
;
if
((
ret
=
(
pgd_t
*
)
__get_free_page
(
GFP_KERNEL
))
!=
NULL
)
clear_page
(
ret
);
return
ret
;
}
void
pgd_free
(
pgd_t
*
pgd
)
{
free_page
((
unsigned
long
)
pgd
);
}
pte_t
*
pte_alloc_one_kernel
(
struct
mm_struct
*
mm
,
unsigned
long
address
)
{
pte_t
*
pte
;
extern
int
mem_init_done
;
extern
void
*
early_get_page
(
void
);
int
timeout
=
0
;
if
(
mem_init_done
)
{
while
((
pte
=
(
pte_t
*
)
__get_free_page
(
GFP_KERNEL
))
==
NULL
&&
++
timeout
<
10
)
{
set_current_state
(
TASK_UNINTERRUPTIBLE
);
schedule_timeout
(
HZ
);
}
}
else
pte
=
(
pte_t
*
)
early_get_page
();
if
(
pte
!=
NULL
)
clear_page
(
pte
);
return
pte
;
}
struct
page
*
pte_alloc_one
(
struct
mm_struct
*
mm
,
unsigned
long
address
)
{
struct
page
*
pte
;
int
timeout
=
0
;
#ifdef CONFIG_HIGHPTE
int
flags
=
GFP_KERNEL
|
__GFP_HIGHMEM
;
#else
int
flags
=
GFP_KERNEL
;
#endif
while
((
pte
=
alloc_pages
(
flags
,
0
))
==
NULL
)
{
if
(
++
timeout
>=
10
)
return
NULL
;
set_current_state
(
TASK_UNINTERRUPTIBLE
);
schedule_timeout
(
HZ
);
}
clear_highpage
(
pte
);
return
pte
;
}
void
pte_free_kernel
(
pte_t
*
pte
)
{
free_page
((
unsigned
long
)
pte
);
}
void
pte_free
(
struct
page
*
pte
)
{
__free_page
(
pte
);
}
#ifndef CONFIG_PPC_ISERIES
void
*
ioremap
(
unsigned
long
addr
,
unsigned
long
size
)
...
...
include/asm-ppc/cacheflush.h
0 → 100644
View file @
f25fcf8b
/*
* BK Id: %F% %I% %G% %U% %#%
*/
/*
* include/asm-ppc/cacheflush.h
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifdef __KERNEL__
#ifndef _PPC_CACHEFLUSH_H
#define _PPC_CACHEFLUSH_H
#include <linux/mm.h>
/*
* No cache flushing is required when address mappings are
* changed, because the caches on PowerPCs are physically
* addressed. -- paulus
* Also, when SMP we use the coherency (M) bit of the
* BATs and PTEs. -- Cort
*/
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, a, b) do { } while (0)
#define flush_cache_page(vma, p) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
extern
void
flush_dcache_page
(
struct
page
*
page
);
extern
void
flush_icache_page
(
struct
vm_area_struct
*
vma
,
struct
page
*
page
);
extern
void
flush_icache_range
(
unsigned
long
,
unsigned
long
);
extern
void
flush_icache_user_range
(
struct
vm_area_struct
*
vma
,
struct
page
*
page
,
unsigned
long
addr
,
int
len
);
extern
void
__flush_dcache_icache
(
void
*
page_va
);
extern
void
__flush_dcache_icache_phys
(
unsigned
long
physaddr
);
#endif _PPC_CACHEFLUSH_H
#endif __KERNEL__
include/asm-ppc/highmem.h
View file @
f25fcf8b
...
...
@@ -28,7 +28,7 @@
#include <linux/init.h>
#include <linux/interrupt.h>
#include <asm/kmap_types.h>
#include <asm/
pgtable
.h>
#include <asm/
tlbflush
.h>
/* undef for production */
#define HIGHMEM_DEBUG 1
...
...
include/asm-ppc/pgalloc.h
View file @
f25fcf8b
...
...
@@ -7,24 +7,12 @@
#include <linux/config.h>
#include <linux/threads.h>
#include <linux/highmem.h>
#include <asm/processor.h>
extern
void
__bad_pte
(
pmd_t
*
pmd
);
static
inline
pgd_t
*
pgd_alloc
(
struct
mm_struct
*
mm
)
{
pgd_t
*
ret
;
if
((
ret
=
(
pgd_t
*
)
__get_free_page
(
GFP_KERNEL
))
!=
NULL
)
clear_page
(
ret
);
return
ret
;
}
extern
__inline__
void
pgd_free
(
pgd_t
*
pgd
)
{
free_page
((
unsigned
long
)
pgd
);
}
extern
pgd_t
*
pgd_alloc
(
struct
mm_struct
*
mm
);
extern
void
pgd_free
(
pgd_t
*
pgd
);
/*
* We don't have any real pmd's, and this code never triggers because
...
...
@@ -34,64 +22,17 @@ extern __inline__ void pgd_free(pgd_t *pgd)
#define pmd_free(x) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG()
static
inline
pte_t
*
pte_alloc_one_kernel
(
struct
mm_struct
*
mm
,
unsigned
long
address
)
{
pte_t
*
pte
;
extern
int
mem_init_done
;
extern
void
*
early_get_page
(
void
);
int
timeout
=
0
;
if
(
mem_init_done
)
{
while
((
pte
=
(
pte_t
*
)
__get_free_page
(
GFP_KERNEL
))
==
NULL
&&
++
timeout
<
10
)
{
set_current_state
(
TASK_UNINTERRUPTIBLE
);
schedule_timeout
(
HZ
);
}
}
else
pte
=
(
pte_t
*
)
early_get_page
();
if
(
pte
!=
NULL
)
clear_page
(
pte
);
return
pte
;
}
static
inline
struct
page
*
pte_alloc_one
(
struct
mm_struct
*
mm
,
unsigned
long
address
)
{
struct
page
*
pte
;
int
timeout
=
0
;
#ifdef CONFIG_HIGHPTE
int
flags
=
GFP_KERNEL
|
__GFP_HIGHMEM
;
#else
int
flags
=
GFP_KERNEL
;
#endif
while
((
pte
=
alloc_pages
(
flags
,
0
))
==
NULL
)
{
if
(
++
timeout
>=
10
)
return
NULL
;
set_current_state
(
TASK_UNINTERRUPTIBLE
);
schedule_timeout
(
HZ
);
}
clear_highpage
(
pte
);
return
pte
;
}
static
inline
void
pte_free_kernel
(
pte_t
*
pte
)
{
free_page
((
unsigned
long
)
pte
);
}
static
inline
void
pte_free
(
struct
page
*
pte
)
{
__free_page
(
pte
);
}
#define pmd_populate_kernel(mm, pmd, pte) \
(pmd_val(*(pmd)) = __pa(pte))
#define pmd_populate(mm, pmd, pte) \
(pmd_val(*(pmd)) = ((pte) - mem_map) << PAGE_SHIFT)
extern
int
do_check_pgt_cache
(
int
,
int
);
extern
pte_t
*
pte_alloc_one_kernel
(
struct
mm_struct
*
mm
,
unsigned
long
addr
);
extern
struct
page
*
pte_alloc_one
(
struct
mm_struct
*
mm
,
unsigned
long
addr
);
extern
void
pte_free_kernel
(
pte_t
*
pte
);
extern
void
pte_free
(
struct
page
*
pte
);
#define check_pgt_cache() do { } while (0)
#endif
/* _PPC_PGALLOC_H */
#endif
/* __KERNEL__ */
include/asm-ppc/pgtable.h
View file @
f25fcf8b
...
...
@@ -13,95 +13,6 @@
#include <asm/processor.h>
/* For TASK_SIZE */
#include <asm/mmu.h>
#include <asm/page.h>
#include <asm/kmap_types.h>
extern
void
_tlbie
(
unsigned
long
address
);
extern
void
_tlbia
(
void
);
#if defined(CONFIG_4xx)
#define __tlbia() asm volatile ("tlbia; sync" : : : "memory")
static
inline
void
local_flush_tlb_all
(
void
)
{
__tlbia
();
}
static
inline
void
local_flush_tlb_mm
(
struct
mm_struct
*
mm
)
{
__tlbia
();
}
static
inline
void
local_flush_tlb_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
vmaddr
)
{
_tlbie
(
vmaddr
);
}
static
inline
void
local_flush_tlb_range
(
struct
mm_struct
*
mm
,
unsigned
long
start
,
unsigned
long
end
)
{
__tlbia
();
}
#define update_mmu_cache(vma, addr, pte) do { } while (0)
#elif defined(CONFIG_8xx)
#define __tlbia() asm volatile ("tlbia; sync" : : : "memory")
static
inline
void
local_flush_tlb_all
(
void
)
{
__tlbia
();
}
static
inline
void
local_flush_tlb_mm
(
struct
mm_struct
*
mm
)
{
__tlbia
();
}
static
inline
void
local_flush_tlb_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
vmaddr
)
{
_tlbie
(
vmaddr
);
}
static
inline
void
local_flush_tlb_range
(
struct
mm_struct
*
mm
,
unsigned
long
start
,
unsigned
long
end
)
{
__tlbia
();
}
#define update_mmu_cache(vma, addr, pte) do { } while (0)
#else
/* 6xx, 7xx, 7xxx cpus */
struct
mm_struct
;
struct
vm_area_struct
;
extern
void
local_flush_tlb_all
(
void
);
extern
void
local_flush_tlb_mm
(
struct
mm_struct
*
mm
);
extern
void
local_flush_tlb_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
vmaddr
);
extern
void
local_flush_tlb_range
(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
unsigned
long
end
);
/*
* This gets called at the end of handling a page fault, when
* the kernel has put a new PTE into the page table for the process.
* We use it to put a corresponding HPTE into the hash table
* ahead of time, instead of waiting for the inevitable extra
* hash-table miss exception.
*/
extern
void
update_mmu_cache
(
struct
vm_area_struct
*
,
unsigned
long
,
pte_t
);
#endif
#define flush_tlb_all local_flush_tlb_all
#define flush_tlb_mm local_flush_tlb_mm
#define flush_tlb_page local_flush_tlb_page
#define flush_tlb_range local_flush_tlb_range
/*
* This is called in munmap when we have freed up some page-table
* pages. We don't need to do anything here, there's nothing special
* about our page-table pages. -- paulus
*/
static
inline
void
flush_tlb_pgtables
(
struct
mm_struct
*
mm
,
unsigned
long
start
,
unsigned
long
end
)
{
}
/*
* No cache flushing is required when address mappings are
* changed, because the caches on PowerPCs are physically
* addressed. -- paulus
* Also, when SMP we use the coherency (M) bit of the
* BATs and PTEs. -- Cort
*/
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, a, b) do { } while (0)
#define flush_cache_page(vma, p) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
extern
void
flush_icache_user_range
(
struct
vm_area_struct
*
vma
,
struct
page
*
page
,
unsigned
long
addr
,
int
len
);
extern
void
flush_icache_range
(
unsigned
long
,
unsigned
long
);
extern
void
__flush_dcache_icache
(
void
*
page_va
);
extern
void
__flush_dcache_icache_phys
(
unsigned
long
physaddr
);
extern
void
flush_dcache_page
(
struct
page
*
page
);
extern
void
flush_icache_page
(
struct
vm_area_struct
*
vma
,
struct
page
*
page
);
extern
unsigned
long
va_to_phys
(
unsigned
long
address
);
extern
pte_t
*
va_to_pte
(
unsigned
long
address
);
...
...
include/asm-ppc/thread_info.h
View file @
f25fcf8b
...
...
@@ -68,6 +68,8 @@ static inline struct thread_info *current_thread_info(void)
#define TI_FLAGS 8
#define TI_CPU 12
#define PREEMPT_ACTIVE 0x4000000
/*
* thread information flag bit numbers
*/
...
...
include/asm-ppc/tlbflush.h
0 → 100644
View file @
f25fcf8b
/*
* BK Id: %F% %I% %G% %U% %#%
*/
/*
* include/asm-ppc/tlbflush.h
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifdef __KERNEL__
#ifndef _PPC_TLBFLUSH_H
#define _PPC_TLBFLUSH_H
#include <linux/config.h>
#include <linux/mm.h>
#include <asm/processor.h>
extern
void
_tlbie
(
unsigned
long
address
);
extern
void
_tlbia
(
void
);
#if defined(CONFIG_4xx)
#define __tlbia() asm volatile ("tlbia; sync" : : : "memory")
static
inline
void
local_flush_tlb_all
(
void
)
{
__tlbia
();
}
static
inline
void
local_flush_tlb_mm
(
struct
mm_struct
*
mm
)
{
__tlbia
();
}
static
inline
void
local_flush_tlb_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
vmaddr
)
{
_tlbie
(
vmaddr
);
}
static
inline
void
local_flush_tlb_range
(
struct
mm_struct
*
mm
,
unsigned
long
start
,
unsigned
long
end
)
{
__tlbia
();
}
#define update_mmu_cache(vma, addr, pte) do { } while (0)
#elif defined(CONFIG_8xx)
#define __tlbia() asm volatile ("tlbia; sync" : : : "memory")
static
inline
void
local_flush_tlb_all
(
void
)
{
__tlbia
();
}
static
inline
void
local_flush_tlb_mm
(
struct
mm_struct
*
mm
)
{
__tlbia
();
}
static
inline
void
local_flush_tlb_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
vmaddr
)
{
_tlbie
(
vmaddr
);
}
static
inline
void
local_flush_tlb_range
(
struct
mm_struct
*
mm
,
unsigned
long
start
,
unsigned
long
end
)
{
__tlbia
();
}
#define update_mmu_cache(vma, addr, pte) do { } while (0)
#else
/* 6xx, 7xx, 7xxx cpus */
struct
mm_struct
;
struct
vm_area_struct
;
extern
void
local_flush_tlb_all
(
void
);
extern
void
local_flush_tlb_mm
(
struct
mm_struct
*
mm
);
extern
void
local_flush_tlb_page
(
struct
vm_area_struct
*
vma
,
unsigned
long
vmaddr
);
extern
void
local_flush_tlb_range
(
struct
vm_area_struct
*
vma
,
unsigned
long
start
,
unsigned
long
end
);
/*
* This gets called at the end of handling a page fault, when
* the kernel has put a new PTE into the page table for the process.
* We use it to put a corresponding HPTE into the hash table
* ahead of time, instead of waiting for the inevitable extra
* hash-table miss exception.
*/
extern
void
update_mmu_cache
(
struct
vm_area_struct
*
,
unsigned
long
,
pte_t
);
#endif
#define flush_tlb_all local_flush_tlb_all
#define flush_tlb_mm local_flush_tlb_mm
#define flush_tlb_page local_flush_tlb_page
#define flush_tlb_range local_flush_tlb_range
/*
* This is called in munmap when we have freed up some page-table
* pages. We don't need to do anything here, there's nothing special
* about our page-table pages. -- paulus
*/
static
inline
void
flush_tlb_pgtables
(
struct
mm_struct
*
mm
,
unsigned
long
start
,
unsigned
long
end
)
{
}
#endif _PPC_TLBFLUSH_H
#endif __KERNEL__
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment