Commit 0c295d0e authored by Christophe Leroy's avatar Christophe Leroy Committed by Michael Ellerman

powerpc/nohash: fix hash related comments in pgtable.h

Signed-off-by: default avatarChristophe Leroy <christophe.leroy@c-s.fr>
Signed-off-by: default avatarMichael Ellerman <mpe@ellerman.id.au>
parent 62b84265
...@@ -223,10 +223,6 @@ static inline unsigned long long pte_update(pte_t *p, ...@@ -223,10 +223,6 @@ static inline unsigned long long pte_update(pte_t *p,
} }
#endif /* CONFIG_PTE_64BIT */ #endif /* CONFIG_PTE_64BIT */
/*
* 2.6 calls this without flushing the TLB entry; this is wrong
* for our hash-based implementation, we fix that up here.
*/
#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG #define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep) static inline int __ptep_test_and_clear_young(unsigned int context, unsigned long addr, pte_t *ptep)
{ {
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
#define _ASM_POWERPC_NOHASH_64_PGTABLE_H #define _ASM_POWERPC_NOHASH_64_PGTABLE_H
/* /*
* This file contains the functions and defines necessary to modify and use * This file contains the functions and defines necessary to modify and use
* the ppc64 hashed page table. * the ppc64 non-hashed page table.
*/ */
#include <asm/nohash/64/pgtable-4k.h> #include <asm/nohash/64/pgtable-4k.h>
...@@ -38,7 +38,7 @@ ...@@ -38,7 +38,7 @@
/* /*
* The vmalloc space starts at the beginning of that region, and * The vmalloc space starts at the beginning of that region, and
* occupies half of it on hash CPUs and a quarter of it on Book3E * occupies a quarter of it on Book3E
* (we keep a quarter for the virtual memmap) * (we keep a quarter for the virtual memmap)
*/ */
#define VMALLOC_START KERN_VIRT_START #define VMALLOC_START KERN_VIRT_START
...@@ -78,7 +78,7 @@ ...@@ -78,7 +78,7 @@
/* /*
* Defines the address of the vmemap area, in its own region on * Defines the address of the vmemap area, in its own region on
* hash table CPUs and after the vmalloc space on Book3E * after the vmalloc space on Book3E
*/ */
#define VMEMMAP_BASE VMALLOC_END #define VMEMMAP_BASE VMALLOC_END
#define VMEMMAP_END KERN_IO_START #define VMEMMAP_END KERN_IO_START
...@@ -248,14 +248,6 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, ...@@ -248,14 +248,6 @@ static inline void huge_ptep_set_wrprotect(struct mm_struct *mm,
pte_update(mm, addr, ptep, _PAGE_RW, 0, 1); pte_update(mm, addr, ptep, _PAGE_RW, 0, 1);
} }
/*
* We currently remove entries from the hashtable regardless of whether
* the entry was young or dirty. The generic routines only flush if the
* entry was young or dirty which is not good enough.
*
* We should be more intelligent about this but for the moment we override
* these functions and force a tlb flush unconditionally
*/
#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH #define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
#define ptep_clear_flush_young(__vma, __address, __ptep) \ #define ptep_clear_flush_young(__vma, __address, __ptep) \
({ \ ({ \
...@@ -279,9 +271,7 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, ...@@ -279,9 +271,7 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr,
} }
/* Set the dirty and/or accessed bits atomically in a linux PTE, this /* Set the dirty and/or accessed bits atomically in a linux PTE */
* function doesn't need to flush the hash entry
*/
static inline void __ptep_set_access_flags(struct vm_area_struct *vma, static inline void __ptep_set_access_flags(struct vm_area_struct *vma,
pte_t *ptep, pte_t entry, pte_t *ptep, pte_t entry,
unsigned long address, unsigned long address,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment