Commit 2d42552d authored by Martin Schwidefsky's avatar Martin Schwidefsky

[S390] merge page_test_dirty and page_clear_dirty

The page_clear_dirty primitive always sets the default storage key
which resets the access control bits and the fetch protection bit.
That will surprise a KVM guest that sets non-zero access control
bits or the fetch protection bit. Merge page_test_dirty and
page_clear_dirty back to a single function and only clear the
dirty bit from the storage key.

In addition move the function page_test_and_clear_dirty and
page_test_and_clear_young to page.h where they belong. This
requires to change the parameter from a struct page * to a page
frame number.
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent c26001d4
...@@ -107,8 +107,8 @@ typedef pte_t *pgtable_t; ...@@ -107,8 +107,8 @@ typedef pte_t *pgtable_t;
#define __pgd(x) ((pgd_t) { (x) } ) #define __pgd(x) ((pgd_t) { (x) } )
#define __pgprot(x) ((pgprot_t) { (x) } ) #define __pgprot(x) ((pgprot_t) { (x) } )
static inline void static inline void page_set_storage_key(unsigned long addr,
page_set_storage_key(unsigned long addr, unsigned int skey, int mapped) unsigned char skey, int mapped)
{ {
if (!mapped) if (!mapped)
asm volatile(".insn rrf,0xb22b0000,%0,%1,8,0" asm volatile(".insn rrf,0xb22b0000,%0,%1,8,0"
...@@ -117,15 +117,59 @@ page_set_storage_key(unsigned long addr, unsigned int skey, int mapped) ...@@ -117,15 +117,59 @@ page_set_storage_key(unsigned long addr, unsigned int skey, int mapped)
asm volatile("sske %0,%1" : : "d" (skey), "a" (addr)); asm volatile("sske %0,%1" : : "d" (skey), "a" (addr));
} }
static inline unsigned int static inline unsigned char page_get_storage_key(unsigned long addr)
page_get_storage_key(unsigned long addr)
{ {
unsigned int skey; unsigned char skey;
asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr), "0" (0)); asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr));
return skey; return skey;
} }
static inline int page_reset_referenced(unsigned long addr)
{
unsigned int ipm;
asm volatile(
" rrbe 0,%1\n"
" ipm %0\n"
: "=d" (ipm) : "a" (addr) : "cc");
return !!(ipm & 0x20000000);
}
/* Bits int the storage key */
#define _PAGE_CHANGED 0x02 /* HW changed bit */
#define _PAGE_REFERENCED 0x04 /* HW referenced bit */
#define _PAGE_FP_BIT 0x08 /* HW fetch protection bit */
#define _PAGE_ACC_BITS 0xf0 /* HW access control bits */
/*
* Test and clear dirty bit in storage key.
* We can't clear the changed bit atomically. This is a potential
* race against modification of the referenced bit. This function
* should therefore only be called if it is not mapped in any
* address space.
*/
#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
static inline int page_test_and_clear_dirty(unsigned long pfn, int mapped)
{
unsigned char skey;
skey = page_get_storage_key(pfn << PAGE_SHIFT);
if (!(skey & _PAGE_CHANGED))
return 0;
page_set_storage_key(pfn << PAGE_SHIFT, skey & ~_PAGE_CHANGED, mapped);
return 1;
}
/*
* Test and clear referenced bit in storage key.
*/
#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
static inline int page_test_and_clear_young(unsigned long pfn)
{
return page_reset_referenced(pfn << PAGE_SHIFT);
}
struct page; struct page;
void arch_free_page(struct page *page, int order); void arch_free_page(struct page *page, int order);
void arch_alloc_page(struct page *page, int order); void arch_alloc_page(struct page *page, int order);
......
...@@ -373,10 +373,6 @@ extern unsigned long VMALLOC_START; ...@@ -373,10 +373,6 @@ extern unsigned long VMALLOC_START;
#define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \ #define _ASCE_USER_BITS (_ASCE_SPACE_SWITCH | _ASCE_PRIVATE_SPACE | \
_ASCE_ALT_EVENT) _ASCE_ALT_EVENT)
/* Bits int the storage key */
#define _PAGE_CHANGED 0x02 /* HW changed bit */
#define _PAGE_REFERENCED 0x04 /* HW referenced bit */
/* /*
* Page protection definitions. * Page protection definitions.
*/ */
...@@ -555,8 +551,6 @@ static inline void rcp_unlock(pte_t *ptep) ...@@ -555,8 +551,6 @@ static inline void rcp_unlock(pte_t *ptep)
#endif #endif
} }
/* forward declaration for SetPageUptodate in page-flags.h*/
static inline void page_clear_dirty(struct page *page, int mapped);
#include <linux/page-flags.h> #include <linux/page-flags.h>
static inline void ptep_rcp_copy(pte_t *ptep) static inline void ptep_rcp_copy(pte_t *ptep)
...@@ -566,7 +560,7 @@ static inline void ptep_rcp_copy(pte_t *ptep) ...@@ -566,7 +560,7 @@ static inline void ptep_rcp_copy(pte_t *ptep)
unsigned int skey; unsigned int skey;
unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE); unsigned long *pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
skey = page_get_storage_key(page_to_phys(page)); skey = page_get_storage_key(pte_val(*ptep) >> PAGE_SHIFT);
if (skey & _PAGE_CHANGED) { if (skey & _PAGE_CHANGED) {
set_bit_simple(RCP_GC_BIT, pgste); set_bit_simple(RCP_GC_BIT, pgste);
set_bit_simple(KVM_UD_BIT, pgste); set_bit_simple(KVM_UD_BIT, pgste);
...@@ -760,6 +754,7 @@ static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm, ...@@ -760,6 +754,7 @@ static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm,
{ {
int dirty; int dirty;
unsigned long *pgste; unsigned long *pgste;
unsigned long pfn;
struct page *page; struct page *page;
unsigned int skey; unsigned int skey;
...@@ -767,8 +762,9 @@ static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm, ...@@ -767,8 +762,9 @@ static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm,
return -EINVAL; return -EINVAL;
rcp_lock(ptep); rcp_lock(ptep);
pgste = (unsigned long *) (ptep + PTRS_PER_PTE); pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
page = virt_to_page(pte_val(*ptep)); pfn = pte_val(*ptep) >> PAGE_SHIFT;
skey = page_get_storage_key(page_to_phys(page)); page = pfn_to_page(pfn);
skey = page_get_storage_key(pfn);
if (skey & _PAGE_CHANGED) { if (skey & _PAGE_CHANGED) {
set_bit_simple(RCP_GC_BIT, pgste); set_bit_simple(RCP_GC_BIT, pgste);
set_bit_simple(KVM_UD_BIT, pgste); set_bit_simple(KVM_UD_BIT, pgste);
...@@ -779,7 +775,7 @@ static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm, ...@@ -779,7 +775,7 @@ static inline int kvm_s390_test_and_clear_page_dirty(struct mm_struct *mm,
} }
dirty = test_and_clear_bit_simple(KVM_UD_BIT, pgste); dirty = test_and_clear_bit_simple(KVM_UD_BIT, pgste);
if (skey & _PAGE_CHANGED) if (skey & _PAGE_CHANGED)
page_clear_dirty(page, 1); page_set_storage_key(pfn, skey & ~_PAGE_CHANGED, 1);
rcp_unlock(ptep); rcp_unlock(ptep);
return dirty; return dirty;
} }
...@@ -790,16 +786,16 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, ...@@ -790,16 +786,16 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep) unsigned long addr, pte_t *ptep)
{ {
#ifdef CONFIG_PGSTE #ifdef CONFIG_PGSTE
unsigned long physpage; unsigned long pfn;
int young; int young;
unsigned long *pgste; unsigned long *pgste;
if (!vma->vm_mm->context.has_pgste) if (!vma->vm_mm->context.has_pgste)
return 0; return 0;
physpage = pte_val(*ptep) & PAGE_MASK; pfn = pte_val(*ptep) >> PAGE_SHIFT;
pgste = (unsigned long *) (ptep + PTRS_PER_PTE); pgste = (unsigned long *) (ptep + PTRS_PER_PTE);
young = ((page_get_storage_key(physpage) & _PAGE_REFERENCED) != 0); young = ((page_get_storage_key(pfn) & _PAGE_REFERENCED) != 0);
rcp_lock(ptep); rcp_lock(ptep);
if (young) if (young)
set_bit_simple(RCP_GR_BIT, pgste); set_bit_simple(RCP_GR_BIT, pgste);
...@@ -936,42 +932,6 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, ...@@ -936,42 +932,6 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm,
__changed; \ __changed; \
}) })
/*
* Test and clear dirty bit in storage key.
* We can't clear the changed bit atomically. This is a potential
* race against modification of the referenced bit. This function
* should therefore only be called if it is not mapped in any
* address space.
*/
#define __HAVE_ARCH_PAGE_TEST_DIRTY
static inline int page_test_dirty(struct page *page)
{
return (page_get_storage_key(page_to_phys(page)) & _PAGE_CHANGED) != 0;
}
#define __HAVE_ARCH_PAGE_CLEAR_DIRTY
static inline void page_clear_dirty(struct page *page, int mapped)
{
page_set_storage_key(page_to_phys(page), PAGE_DEFAULT_KEY, mapped);
}
/*
* Test and clear referenced bit in storage key.
*/
#define __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
static inline int page_test_and_clear_young(struct page *page)
{
unsigned long physpage = page_to_phys(page);
int ccode;
asm volatile(
" rrbe 0,%1\n"
" ipm %0\n"
" srl %0,28\n"
: "=d" (ccode) : "a" (physpage) : "cc" );
return ccode & 2;
}
/* /*
* Conversion functions: convert a page and protection to a page entry, * Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to. * and a page entry and page directory to the page they refer to.
......
...@@ -184,22 +184,18 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b) ...@@ -184,22 +184,18 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
#endif #endif
#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY #ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
#define page_test_dirty(page) (0) #define page_test_and_clear_dirty(pfn, mapped) (0)
#endif #endif
#ifndef __HAVE_ARCH_PAGE_CLEAR_DIRTY #ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_DIRTY
#define page_clear_dirty(page, mapped) do { } while (0)
#endif
#ifndef __HAVE_ARCH_PAGE_TEST_DIRTY
#define pte_maybe_dirty(pte) pte_dirty(pte) #define pte_maybe_dirty(pte) pte_dirty(pte)
#else #else
#define pte_maybe_dirty(pte) (1) #define pte_maybe_dirty(pte) (1)
#endif #endif
#ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG #ifndef __HAVE_ARCH_PAGE_TEST_AND_CLEAR_YOUNG
#define page_test_and_clear_young(page) (0) #define page_test_and_clear_young(pfn) (0)
#endif #endif
#ifndef __HAVE_ARCH_PGD_OFFSET_GATE #ifndef __HAVE_ARCH_PGD_OFFSET_GATE
......
...@@ -308,7 +308,7 @@ static inline void SetPageUptodate(struct page *page) ...@@ -308,7 +308,7 @@ static inline void SetPageUptodate(struct page *page)
{ {
#ifdef CONFIG_S390 #ifdef CONFIG_S390
if (!test_and_set_bit(PG_uptodate, &page->flags)) if (!test_and_set_bit(PG_uptodate, &page->flags))
page_clear_dirty(page, 0); page_set_storage_key(page_to_pfn(page), PAGE_DEFAULT_KEY, 0);
#else #else
/* /*
* Memory barrier must be issued before setting the PG_uptodate bit, * Memory barrier must be issued before setting the PG_uptodate bit,
......
...@@ -719,7 +719,7 @@ int page_referenced(struct page *page, ...@@ -719,7 +719,7 @@ int page_referenced(struct page *page,
unlock_page(page); unlock_page(page);
} }
out: out:
if (page_test_and_clear_young(page)) if (page_test_and_clear_young(page_to_pfn(page)))
referenced++; referenced++;
return referenced; return referenced;
...@@ -785,12 +785,10 @@ int page_mkclean(struct page *page) ...@@ -785,12 +785,10 @@ int page_mkclean(struct page *page)
struct address_space *mapping = page_mapping(page); struct address_space *mapping = page_mapping(page);
if (mapping) { if (mapping) {
ret = page_mkclean_file(mapping, page); ret = page_mkclean_file(mapping, page);
if (page_test_dirty(page)) { if (page_test_and_clear_dirty(page_to_pfn(page), 1))
page_clear_dirty(page, 1);
ret = 1; ret = 1;
} }
} }
}
return ret; return ret;
} }
...@@ -981,10 +979,9 @@ void page_remove_rmap(struct page *page) ...@@ -981,10 +979,9 @@ void page_remove_rmap(struct page *page)
* not if it's in swapcache - there might be another pte slot * not if it's in swapcache - there might be another pte slot
* containing the swap entry, but page not yet written to swap. * containing the swap entry, but page not yet written to swap.
*/ */
if ((!PageAnon(page) || PageSwapCache(page)) && page_test_dirty(page)) { if ((!PageAnon(page) || PageSwapCache(page)) &&
page_clear_dirty(page, 1); page_test_and_clear_dirty(page_to_pfn(page), 1))
set_page_dirty(page); set_page_dirty(page);
}
/* /*
* Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED * Hugepages are not counted in NR_ANON_PAGES nor NR_FILE_MAPPED
* and not charged by memcg for now. * and not charged by memcg for now.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment