Commit 0b2b6e1d authored by Heiko Carstens's avatar Heiko Carstens Committed by Martin Schwidefsky

[S390] Remove open-coded mem_map usage.

Use page_to_phys and pfn_to_page to avoid open-coded mem_map usage.
Signed-off-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
parent 7676bef9
...@@ -62,19 +62,21 @@ void show_mem(void) ...@@ -62,19 +62,21 @@ void show_mem(void)
{ {
int i, total = 0, reserved = 0; int i, total = 0, reserved = 0;
int shared = 0, cached = 0; int shared = 0, cached = 0;
struct page *page;
printk("Mem-info:\n"); printk("Mem-info:\n");
show_free_areas(); show_free_areas();
printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
i = max_mapnr; i = max_mapnr;
while (i-- > 0) { while (i-- > 0) {
page = pfn_to_page(i);
total++; total++;
if (PageReserved(mem_map+i)) if (PageReserved(page))
reserved++; reserved++;
else if (PageSwapCache(mem_map+i)) else if (PageSwapCache(page))
cached++; cached++;
else if (page_count(mem_map+i)) else if (page_count(page))
shared += page_count(mem_map+i) - 1; shared += page_count(page) - 1;
} }
printk("%d pages of RAM\n",total); printk("%d pages of RAM\n",total);
printk("%d reserved pages\n",reserved); printk("%d reserved pages\n",reserved);
......
...@@ -45,11 +45,6 @@ static inline void * phys_to_virt(unsigned long address) ...@@ -45,11 +45,6 @@ static inline void * phys_to_virt(unsigned long address)
return __io_virt(address); return __io_virt(address);
} }
/*
* Change "struct page" to physical address.
*/
#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT)
extern void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags); extern void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
static inline void * ioremap (unsigned long offset, unsigned long size) static inline void * ioremap (unsigned long offset, unsigned long size)
......
...@@ -137,6 +137,7 @@ page_get_storage_key(unsigned long addr) ...@@ -137,6 +137,7 @@ page_get_storage_key(unsigned long addr)
#define __pa(x) (unsigned long)(x) #define __pa(x) (unsigned long)(x)
#define __va(x) (void *)(unsigned long)(x) #define __va(x) (void *)(unsigned long)(x)
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT) #define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define page_to_phys(page) (page_to_pfn(page) << PAGE_SHIFT)
#define pfn_valid(pfn) ((pfn) < max_mapnr) #define pfn_valid(pfn) ((pfn) < max_mapnr)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT) #define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
......
...@@ -116,7 +116,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) ...@@ -116,7 +116,7 @@ pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
static inline void static inline void
pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page) pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
{ {
pmd_populate_kernel(mm, pmd, (pte_t *)((page-mem_map) << PAGE_SHIFT)); pmd_populate_kernel(mm, pmd, (pte_t *)page_to_phys(page));
} }
/* /*
......
...@@ -599,7 +599,7 @@ ptep_establish(struct vm_area_struct *vma, ...@@ -599,7 +599,7 @@ ptep_establish(struct vm_area_struct *vma,
*/ */
static inline int page_test_and_clear_dirty(struct page *page) static inline int page_test_and_clear_dirty(struct page *page)
{ {
unsigned long physpage = __pa((page - mem_map) << PAGE_SHIFT); unsigned long physpage = page_to_phys(page);
int skey = page_get_storage_key(physpage); int skey = page_get_storage_key(physpage);
if (skey & _PAGE_CHANGED) if (skey & _PAGE_CHANGED)
...@@ -612,13 +612,13 @@ static inline int page_test_and_clear_dirty(struct page *page) ...@@ -612,13 +612,13 @@ static inline int page_test_and_clear_dirty(struct page *page)
*/ */
static inline int page_test_and_clear_young(struct page *page) static inline int page_test_and_clear_young(struct page *page)
{ {
unsigned long physpage = __pa((page - mem_map) << PAGE_SHIFT); unsigned long physpage = page_to_phys(page);
int ccode; int ccode;
asm volatile ( asm volatile(
"rrbe 0,%1\n" " rrbe 0,%1\n"
"ipm %0\n" " ipm %0\n"
"srl %0,28\n" " srl %0,28\n"
: "=d" (ccode) : "a" (physpage) : "cc" ); : "=d" (ccode) : "a" (physpage) : "cc" );
return ccode & 2; return ccode & 2;
} }
...@@ -636,7 +636,7 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot) ...@@ -636,7 +636,7 @@ static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
static inline pte_t mk_pte(struct page *page, pgprot_t pgprot) static inline pte_t mk_pte(struct page *page, pgprot_t pgprot)
{ {
unsigned long physpage = __pa((page - mem_map) << PAGE_SHIFT); unsigned long physpage = page_to_phys(page);
return mk_pte_phys(physpage, pgprot); return mk_pte_phys(physpage, pgprot);
} }
...@@ -664,11 +664,11 @@ static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot) ...@@ -664,11 +664,11 @@ static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t pgprot)
#define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK) #define pmd_page_vaddr(pmd) (pmd_val(pmd) & PAGE_MASK)
#define pmd_page(pmd) (mem_map+(pmd_val(pmd) >> PAGE_SHIFT)) #define pmd_page(pmd) pfn_to_page(pmd_val(pmd) >> PAGE_SHIFT)
#define pgd_page_vaddr(pgd) (pgd_val(pgd) & PAGE_MASK) #define pgd_page_vaddr(pgd) (pgd_val(pgd) & PAGE_MASK)
#define pgd_page(pgd) (mem_map+(pgd_val(pgd) >> PAGE_SHIFT)) #define pgd_page(pgd) pfn_to_page(pgd_val(pgd) >> PAGE_SHIFT)
/* to find an entry in a page-table-directory */ /* to find an entry in a page-table-directory */
#define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1)) #define pgd_index(address) (((address) >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment