Commit 4d942466 authored by Mel Gorman's avatar Mel Gorman Committed by Linus Torvalds

mm: convert p[te|md]_mknonnuma and remaining page table manipulations

With PROT_NONE, the traditional page table manipulation functions are
sufficient.

[andre.przywara@arm.com: fix compiler warning in pmdp_invalidate()]
[akpm@linux-foundation.org: fix build with STRICT_MM_TYPECHECKS]
Signed-off-by: default avatarMel Gorman <mgorman@suse.de>
Acked-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
Acked-by: default avatarAneesh Kumar <aneesh.kumar@linux.vnet.ibm.com>
Tested-by: default avatarSasha Levin <sasha.levin@oracle.com>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Cc: Dave Jones <davej@redhat.com>
Cc: Hugh Dickins <hughd@google.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Kirill Shutemov <kirill.shutemov@linux.intel.com>
Cc: Paul Mackerras <paulus@samba.org>
Cc: Rik van Riel <riel@redhat.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 842915f5
...@@ -257,7 +257,10 @@ PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF); ...@@ -257,7 +257,10 @@ PMD_BIT_FUNC(mkyoung, |= PMD_SECT_AF);
#define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot) #define mk_pmd(page,prot) pfn_pmd(page_to_pfn(page),prot)
/* represent a notpresent pmd by zero, this is used by pmdp_invalidate */ /* represent a notpresent pmd by zero, this is used by pmdp_invalidate */
#define pmd_mknotpresent(pmd) (__pmd(0)) static inline pmd_t pmd_mknotpresent(pmd_t pmd)
{
return __pmd(0);
}
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{ {
......
...@@ -31,8 +31,7 @@ extern int move_huge_pmd(struct vm_area_struct *vma, ...@@ -31,8 +31,7 @@ extern int move_huge_pmd(struct vm_area_struct *vma,
unsigned long new_addr, unsigned long old_end, unsigned long new_addr, unsigned long old_end,
pmd_t *old_pmd, pmd_t *new_pmd); pmd_t *old_pmd, pmd_t *new_pmd);
extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, extern int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, pgprot_t newprot, unsigned long addr, pgprot_t newprot);
int prot_numa);
enum transparent_hugepage_flag { enum transparent_hugepage_flag {
TRANSPARENT_HUGEPAGE_FLAG, TRANSPARENT_HUGEPAGE_FLAG,
......
...@@ -1355,9 +1355,8 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -1355,9 +1355,8 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto out; goto out;
clear_pmdnuma: clear_pmdnuma:
BUG_ON(!PageLocked(page)); BUG_ON(!PageLocked(page));
pmd = pmd_mknonnuma(pmd); pmd = pmd_modify(pmd, vma->vm_page_prot);
set_pmd_at(mm, haddr, pmdp, pmd); set_pmd_at(mm, haddr, pmdp, pmd);
VM_BUG_ON(pmd_protnone(*pmdp));
update_mmu_cache_pmd(vma, addr, pmdp); update_mmu_cache_pmd(vma, addr, pmdp);
unlock_page(page); unlock_page(page);
out_unlock: out_unlock:
...@@ -1472,7 +1471,7 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma, ...@@ -1472,7 +1471,7 @@ int move_huge_pmd(struct vm_area_struct *vma, struct vm_area_struct *new_vma,
* - HPAGE_PMD_NR is protections changed and TLB flush necessary * - HPAGE_PMD_NR is protections changed and TLB flush necessary
*/ */
int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
unsigned long addr, pgprot_t newprot, int prot_numa) unsigned long addr, pgprot_t newprot)
{ {
struct mm_struct *mm = vma->vm_mm; struct mm_struct *mm = vma->vm_mm;
spinlock_t *ptl; spinlock_t *ptl;
...@@ -1481,29 +1480,11 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd, ...@@ -1481,29 +1480,11 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) { if (__pmd_trans_huge_lock(pmd, vma, &ptl) == 1) {
pmd_t entry; pmd_t entry;
ret = 1; ret = 1;
if (!prot_numa) { entry = pmdp_get_and_clear_notify(mm, addr, pmd);
entry = pmdp_get_and_clear_notify(mm, addr, pmd); entry = pmd_modify(entry, newprot);
if (pmd_protnone(entry)) ret = HPAGE_PMD_NR;
entry = pmd_mknonnuma(entry); set_pmd_at(mm, addr, pmd, entry);
entry = pmd_modify(entry, newprot); BUG_ON(pmd_write(entry));
ret = HPAGE_PMD_NR;
set_pmd_at(mm, addr, pmd, entry);
BUG_ON(pmd_write(entry));
} else {
struct page *page = pmd_page(*pmd);
/*
* Do not trap faults against the zero page. The
* read-only data is likely to be read-cached on the
* local CPU cache and it is less useful to know about
* local vs remote hits on the zero page.
*/
if (!is_huge_zero_page(page) &&
!pmd_protnone(*pmd)) {
pmdp_set_numa(mm, addr, pmd);
ret = HPAGE_PMD_NR;
}
}
spin_unlock(ptl); spin_unlock(ptl);
} }
......
...@@ -3018,9 +3018,9 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -3018,9 +3018,9 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
* validation through pte_unmap_same(). It's of NUMA type but * validation through pte_unmap_same(). It's of NUMA type but
* the pfn may be screwed if the read is non atomic. * the pfn may be screwed if the read is non atomic.
* *
* ptep_modify_prot_start is not called as this is clearing * We can safely just do a "set_pte_at()", because the old
* the _PAGE_NUMA bit and it is not really expected that there * page table entry is not accessible, so there would be no
* would be concurrent hardware modifications to the PTE. * concurrent hardware modifications to the PTE.
*/ */
ptl = pte_lockptr(mm, pmd); ptl = pte_lockptr(mm, pmd);
spin_lock(ptl); spin_lock(ptl);
...@@ -3029,7 +3029,9 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -3029,7 +3029,9 @@ static int do_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
goto out; goto out;
} }
pte = pte_mknonnuma(pte); /* Make it present again */
pte = pte_modify(pte, vma->vm_page_prot);
pte = pte_mkyoung(pte);
set_pte_at(mm, addr, ptep, pte); set_pte_at(mm, addr, ptep, pte);
update_mmu_cache(vma, addr, ptep); update_mmu_cache(vma, addr, ptep);
......
...@@ -569,7 +569,7 @@ unsigned long change_prot_numa(struct vm_area_struct *vma, ...@@ -569,7 +569,7 @@ unsigned long change_prot_numa(struct vm_area_struct *vma,
{ {
int nr_updated; int nr_updated;
nr_updated = change_protection(vma, addr, end, vma->vm_page_prot, 0, 1); nr_updated = change_protection(vma, addr, end, PAGE_NONE, 0, 1);
if (nr_updated) if (nr_updated)
count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated); count_vm_numa_events(NUMA_PTE_UPDATES, nr_updated);
......
...@@ -1847,7 +1847,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm, ...@@ -1847,7 +1847,7 @@ int migrate_misplaced_transhuge_page(struct mm_struct *mm,
out_dropref: out_dropref:
ptl = pmd_lock(mm, pmd); ptl = pmd_lock(mm, pmd);
if (pmd_same(*pmd, entry)) { if (pmd_same(*pmd, entry)) {
entry = pmd_mknonnuma(entry); entry = pmd_modify(entry, vma->vm_page_prot);
set_pmd_at(mm, mmun_start, pmd, entry); set_pmd_at(mm, mmun_start, pmd, entry);
update_mmu_cache_pmd(vma, address, &entry); update_mmu_cache_pmd(vma, address, &entry);
} }
......
...@@ -142,7 +142,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma, ...@@ -142,7 +142,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
split_huge_page_pmd(vma, addr, pmd); split_huge_page_pmd(vma, addr, pmd);
else { else {
int nr_ptes = change_huge_pmd(vma, pmd, addr, int nr_ptes = change_huge_pmd(vma, pmd, addr,
newprot, prot_numa); newprot);
if (nr_ptes) { if (nr_ptes) {
if (nr_ptes == HPAGE_PMD_NR) { if (nr_ptes == HPAGE_PMD_NR) {
......
...@@ -193,8 +193,6 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address, ...@@ -193,8 +193,6 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
pmd_t *pmdp) pmd_t *pmdp)
{ {
pmd_t entry = *pmdp; pmd_t entry = *pmdp;
if (pmd_protnone(entry))
entry = pmd_mknonnuma(entry);
set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry)); set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry));
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE); flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment