Commit 41879ff6 authored by Heiko Carstens's avatar Heiko Carstens Committed by Martin Schwidefsky

s390/mm: use memset64 instead of clear_table

Use memset64 instead of the (now) open-coded variant clear_table.
Performance wise there is no difference.
Signed-off-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: default avatarMartin Schwidefsky <schwidefsky@de.ibm.com>
parent 0b77d670
...@@ -12,6 +12,7 @@ ...@@ -12,6 +12,7 @@
#define _S390_PGALLOC_H #define _S390_PGALLOC_H
#include <linux/threads.h> #include <linux/threads.h>
#include <linux/string.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/mm.h> #include <linux/mm.h>
...@@ -27,24 +28,9 @@ void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long); ...@@ -27,24 +28,9 @@ void page_table_free_rcu(struct mmu_gather *, unsigned long *, unsigned long);
void page_table_free_pgste(struct page *page); void page_table_free_pgste(struct page *page);
extern int page_table_allocate_pgste; extern int page_table_allocate_pgste;
static inline void clear_table(unsigned long *s, unsigned long val, size_t n)
{
struct addrtype { char _[256]; };
int i;
for (i = 0; i < n; i += 256) {
*s = val;
asm volatile(
"mvc 8(248,%[s]),0(%[s])\n"
: "+m" (*(struct addrtype *) s)
: [s] "a" (s));
s += 256 / sizeof(long);
}
}
static inline void crst_table_init(unsigned long *crst, unsigned long entry) static inline void crst_table_init(unsigned long *crst, unsigned long entry)
{ {
clear_table(crst, entry, _CRST_TABLE_SIZE); memset64((u64 *)crst, entry, _CRST_ENTRIES);
} }
static inline unsigned long pgd_entry_type(struct mm_struct *mm) static inline unsigned long pgd_entry_type(struct mm_struct *mm)
......
...@@ -166,10 +166,8 @@ int vdso_alloc_per_cpu(struct lowcore *lowcore) ...@@ -166,10 +166,8 @@ int vdso_alloc_per_cpu(struct lowcore *lowcore)
vd->node_id = cpu_to_node(vd->cpu_nr); vd->node_id = cpu_to_node(vd->cpu_nr);
/* Set up access register mode page table */ /* Set up access register mode page table */
clear_table((unsigned long *) segment_table, _SEGMENT_ENTRY_EMPTY, memset64((u64 *)segment_table, _SEGMENT_ENTRY_EMPTY, _CRST_ENTRIES);
PAGE_SIZE << SEGMENT_ORDER); memset64((u64 *)page_table, _PAGE_INVALID, PTRS_PER_PTE);
clear_table((unsigned long *) page_table, _PAGE_INVALID,
256*sizeof(unsigned long));
*(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table; *(unsigned long *) segment_table = _SEGMENT_ENTRY + page_table;
*(unsigned long *) page_table = _PAGE_PROTECT + page_frame; *(unsigned long *) page_table = _PAGE_PROTECT + page_frame;
......
...@@ -158,13 +158,13 @@ static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits) ...@@ -158,13 +158,13 @@ static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits)
struct page *page_table_alloc_pgste(struct mm_struct *mm) struct page *page_table_alloc_pgste(struct mm_struct *mm)
{ {
struct page *page; struct page *page;
unsigned long *table; u64 *table;
page = alloc_page(GFP_KERNEL); page = alloc_page(GFP_KERNEL);
if (page) { if (page) {
table = (unsigned long *) page_to_phys(page); table = (u64 *)page_to_phys(page);
clear_table(table, _PAGE_INVALID, PAGE_SIZE/2); memset64(table, _PAGE_INVALID, PTRS_PER_PTE);
clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2); memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
} }
return page; return page;
} }
...@@ -221,12 +221,12 @@ unsigned long *page_table_alloc(struct mm_struct *mm) ...@@ -221,12 +221,12 @@ unsigned long *page_table_alloc(struct mm_struct *mm)
if (mm_alloc_pgste(mm)) { if (mm_alloc_pgste(mm)) {
/* Return 4K page table with PGSTEs */ /* Return 4K page table with PGSTEs */
atomic_set(&page->_mapcount, 3); atomic_set(&page->_mapcount, 3);
clear_table(table, _PAGE_INVALID, PAGE_SIZE/2); memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE);
clear_table(table + PTRS_PER_PTE, 0, PAGE_SIZE/2); memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE);
} else { } else {
/* Return the first 2K fragment of the page */ /* Return the first 2K fragment of the page */
atomic_set(&page->_mapcount, 1); atomic_set(&page->_mapcount, 1);
clear_table(table, _PAGE_INVALID, PAGE_SIZE); memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE);
spin_lock_bh(&mm->context.lock); spin_lock_bh(&mm->context.lock);
list_add(&page->lru, &mm->context.pgtable_list); list_add(&page->lru, &mm->context.pgtable_list);
spin_unlock_bh(&mm->context.lock); spin_unlock_bh(&mm->context.lock);
......
...@@ -59,7 +59,7 @@ pte_t __ref *vmem_pte_alloc(void) ...@@ -59,7 +59,7 @@ pte_t __ref *vmem_pte_alloc(void)
pte = (pte_t *) memblock_alloc(size, size); pte = (pte_t *) memblock_alloc(size, size);
if (!pte) if (!pte)
return NULL; return NULL;
clear_table((unsigned long *) pte, _PAGE_INVALID, size); memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE);
return pte; return pte;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment