Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
f38e3c34
Commit
f38e3c34
authored
May 10, 2002
by
Paul Mackerras
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
Define pfn_to_page, page_to_pfn, pte_pfn, pfn_pte for ppc32.
parent
4e416e98
Changes
2
Hide whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
26 additions
and
37 deletions
+26
-37
include/asm-ppc/page.h
include/asm-ppc/page.h
+15
-15
include/asm-ppc/pgtable.h
include/asm-ppc/pgtable.h
+11
-22
No files found.
include/asm-ppc/page.h
View file @
f38e3c34
...
...
@@ -90,13 +90,16 @@ extern void copy_page(void *to, void *from);
extern
void
clear_user_page
(
void
*
page
,
unsigned
long
vaddr
);
extern
void
copy_user_page
(
void
*
to
,
void
*
from
,
unsigned
long
vaddr
);
extern
unsigned
long
ppc_memstart
;
extern
unsigned
long
ppc_memoffset
;
#ifndef CONFIG_APUS
#define PPC_MEMSTART 0
#define PPC_PGSTART 0
#define PPC_MEMOFFSET PAGE_OFFSET
#else
extern
unsigned
long
ppc_memstart
;
extern
unsigned
long
ppc_pgstart
;
extern
unsigned
long
ppc_memoffset
;
#define PPC_MEMSTART ppc_memstart
#define PPC_PGSTART ppc_pgstart
#define PPC_MEMOFFSET ppc_memoffset
#endif
...
...
@@ -136,24 +139,21 @@ static inline void* ___va(unsigned long p)
#define __pa(x) ___pa((unsigned long)(x))
#define __va(x) ((void *)(___va((unsigned long)(x))))
#define
MAP_PAGE_RESERVED (1<<15
)
#define
virt_to_page(kaddr) (mem_map + (((unsigned long)kaddr-PAGE_OFFSET) >> PAGE_SHIFT)
)
#define
VALID_PAGE(page) ((page - mem_map) < max_mapnr
)
#define
pfn_to_page(pfn) (mem_map + ((pfn) - PPC_PGSTART)
)
#define
page_to_pfn(page) ((unsigned long)((page) - mem_map) + PPC_PGSTART
)
#define
virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT
)
extern
unsigned
long
get_zero_page_fast
(
void
);
#define pfn_valid(pfn) (((pfn) - PPC_PGSTART) < max_mapnr)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
/* Pure 2^n version of get_order */
extern
__inline__
int
get_order
(
unsigned
long
size
)
{
int
order
;
size
=
(
size
-
1
)
>>
(
PAGE_SHIFT
-
1
);
order
=
-
1
;
do
{
size
>>=
1
;
order
++
;
}
while
(
size
);
return
order
;
int
lz
;
size
=
(
size
-
1
)
>>
PAGE_SHIFT
;
asm
(
"cntlzw %0,%1"
:
"=r"
(
lz
)
:
"r"
(
size
));
return
32
-
lz
;
}
#endif
/* __ASSEMBLY__ */
...
...
include/asm-ppc/pgtable.h
View file @
f38e3c34
...
...
@@ -148,7 +148,7 @@ extern unsigned long ioremap_bot, ioremap_base;
is cleared in the TLB miss handler before the TLB entry is loaded.
- All other bits of the PTE are loaded into TLBLO without
modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for
software PTE bits. We actually use use bits 2
0
, 24, 25, 26, and
software PTE bits. We actually use use bits 2
1
, 24, 25, 26, and
30 respectively for the software bits: ACCESSED, DIRTY, RW, EXEC,
PRESENT.
*/
...
...
@@ -283,6 +283,16 @@ extern unsigned long ioremap_bot, ioremap_base;
#define __S111 PAGE_SHARED_X
#ifndef __ASSEMBLY__
/*
* Conversions between PTE values and page frame numbers.
*/
#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
#define pte_page(x) pfn_to_page(pte_pfn(x))
#define pfn_pte(pfn, prot) __pte(((pfn) << PAGE_SHIFT) | pgprot_val(prot))
#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
/*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
...
...
@@ -301,8 +311,6 @@ extern unsigned long empty_zero_page[1024];
#define pmd_present(pmd) (pmd_val(pmd) != 0)
#define pmd_clear(pmdp) do { pmd_val(*(pmdp)) = 0; } while (0)
#define pte_page(x) (mem_map+(unsigned long)((pte_val(x)-PPC_MEMSTART) >> PAGE_SHIFT))
#ifndef __ASSEMBLY__
/*
* The "pgd_xxx()" functions here are trivial for a folded two-level
...
...
@@ -352,25 +360,6 @@ static inline pte_t pte_mkdirty(pte_t pte) {
static
inline
pte_t
pte_mkyoung
(
pte_t
pte
)
{
pte_val
(
pte
)
|=
_PAGE_ACCESSED
;
return
pte
;
}
/*
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
static
inline
pte_t
mk_pte_phys
(
unsigned
long
physpage
,
pgprot_t
pgprot
)
{
pte_t
pte
;
pte_val
(
pte
)
=
physpage
|
pgprot_val
(
pgprot
);
return
pte
;
}
#define mk_pte(page,pgprot) \
({ \
pte_t pte; \
pte_val(pte) = (((page - mem_map) << PAGE_SHIFT) + PPC_MEMSTART) | pgprot_val(pgprot); \
pte; \
})
static
inline
pte_t
pte_modify
(
pte_t
pte
,
pgprot_t
newprot
)
{
pte_val
(
pte
)
=
(
pte_val
(
pte
)
&
_PAGE_CHG_MASK
)
|
pgprot_val
(
newprot
);
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment