Commit 0dfae7d5 authored by Paul Mundt's avatar Paul Mundt

sh: Use the now generic SH-4 clear/copy page ops for all MMU platforms.

Now that the SH-4 page clear/copy ops are generic, they can be used for
all platforms with CONFIG_MMU=y. SH-5 remains the odd one out, but it too
will gradually be converted over to using this interface.

SH-3 platforms which do not contain aliases will see no impact from this
change, while aliasing SH-3 platforms will get the same interface as
SH-4.
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent 221c007b
...@@ -49,7 +49,6 @@ static inline void flush_kernel_dcache_page(struct page *page) ...@@ -49,7 +49,6 @@ static inline void flush_kernel_dcache_page(struct page *page)
flush_dcache_page(page); flush_dcache_page(page);
} }
#if (defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)) && !defined(CONFIG_CACHE_OFF)
extern void copy_to_user_page(struct vm_area_struct *vma, extern void copy_to_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst, const void *src, struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len); unsigned long len);
...@@ -57,20 +56,6 @@ extern void copy_to_user_page(struct vm_area_struct *vma, ...@@ -57,20 +56,6 @@ extern void copy_to_user_page(struct vm_area_struct *vma,
extern void copy_from_user_page(struct vm_area_struct *vma, extern void copy_from_user_page(struct vm_area_struct *vma,
struct page *page, unsigned long vaddr, void *dst, const void *src, struct page *page, unsigned long vaddr, void *dst, const void *src,
unsigned long len); unsigned long len);
#else
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr, page_to_pfn(page));\
memcpy(dst, src, len); \
flush_icache_user_range(vma, page, vaddr, len); \
} while (0)
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr, page_to_pfn(page));\
memcpy(dst, src, len); \
} while (0)
#endif
#define flush_cache_vmap(start, end) flush_cache_all() #define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vunmap(start, end) flush_cache_all() #define flush_cache_vunmap(start, end) flush_cache_all()
......
...@@ -63,22 +63,23 @@ extern void copy_page(void *to, void *from); ...@@ -63,22 +63,23 @@ extern void copy_page(void *to, void *from);
struct page; struct page;
struct vm_area_struct; struct vm_area_struct;
#if !defined(CONFIG_CACHE_OFF) && defined(CONFIG_MMU) && \ #if defined(CONFIG_CPU_SH5)
(defined(CONFIG_CPU_SH5) || defined(CONFIG_CPU_SH4) || \
defined(CONFIG_SH7705_CACHE_32KB))
extern void clear_user_page(void *to, unsigned long address, struct page *page); extern void clear_user_page(void *to, unsigned long address, struct page *page);
extern void copy_user_page(void *to, void *from, unsigned long address, extern void copy_user_page(void *to, void *from, unsigned long address,
struct page *page); struct page *page);
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
#elif defined(CONFIG_MMU)
extern void copy_user_highpage(struct page *to, struct page *from, extern void copy_user_highpage(struct page *to, struct page *from,
unsigned long vaddr, struct vm_area_struct *vma); unsigned long vaddr, struct vm_area_struct *vma);
#define __HAVE_ARCH_COPY_USER_HIGHPAGE #define __HAVE_ARCH_COPY_USER_HIGHPAGE
extern void clear_user_highpage(struct page *page, unsigned long vaddr); extern void clear_user_highpage(struct page *page, unsigned long vaddr);
#define clear_user_highpage clear_user_highpage #define clear_user_highpage clear_user_highpage
#endif
#else #else
#define clear_user_page(page, vaddr, pg) clear_page(page) #define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) #define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
#endif #endif
/* /*
......
...@@ -141,8 +141,7 @@ extern void paging_init(void); ...@@ -141,8 +141,7 @@ extern void paging_init(void);
extern void page_table_range_init(unsigned long start, unsigned long end, extern void page_table_range_init(unsigned long start, unsigned long end,
pgd_t *pgd); pgd_t *pgd);
#if !defined(CONFIG_CACHE_OFF) && (defined(CONFIG_CPU_SH4) || \ #if defined(CONFIG_MMU) && !defined(CONFIG_CPU_SH5)
defined(CONFIG_SH7705_CACHE_32KB)) && defined(CONFIG_MMU)
extern void kmap_coherent_init(void); extern void kmap_coherent_init(void);
#else #else
#define kmap_coherent_init() do { } while (0) #define kmap_coherent_init() do { } while (0)
......
...@@ -15,7 +15,7 @@ endif ...@@ -15,7 +15,7 @@ endif
obj-y += $(cache-y) obj-y += $(cache-y)
mmu-y := tlb-nommu.o pg-nommu.o mmu-y := tlb-nommu.o pg-nommu.o
mmu-$(CONFIG_MMU) := fault_32.o tlbflush_32.o ioremap_32.o mmu-$(CONFIG_MMU) := fault_32.o tlbflush_32.o ioremap_32.o pg-mmu.o
obj-y += $(mmu-y) obj-y += $(mmu-y)
obj-$(CONFIG_DEBUG_FS) += asids-debugfs.o obj-$(CONFIG_DEBUG_FS) += asids-debugfs.o
...@@ -29,10 +29,6 @@ tlb-$(CONFIG_CPU_SH3) := tlb-sh3.o ...@@ -29,10 +29,6 @@ tlb-$(CONFIG_CPU_SH3) := tlb-sh3.o
tlb-$(CONFIG_CPU_SH4) := tlb-sh4.o tlb-$(CONFIG_CPU_SH4) := tlb-sh4.o
tlb-$(CONFIG_CPU_HAS_PTEAEX) := tlb-pteaex.o tlb-$(CONFIG_CPU_HAS_PTEAEX) := tlb-pteaex.o
obj-y += $(tlb-y) obj-y += $(tlb-y)
ifndef CONFIG_CACHE_OFF
obj-$(CONFIG_CPU_SH4) += pg-sh4.o
obj-$(CONFIG_SH7705_CACHE_32KB) += pg-sh4.o
endif
endif endif
obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
......
...@@ -831,4 +831,21 @@ void clear_user_page(void *to, unsigned long address, struct page *page) ...@@ -831,4 +831,21 @@ void clear_user_page(void *to, unsigned long address, struct page *page)
else else
sh64_clear_user_page_coloured(to, address); sh64_clear_user_page_coloured(to, address);
} }
void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, void *dst, const void *src,
unsigned long len)
{
flush_cache_page(vma, vaddr, page_to_pfn(page));
memcpy(dst, src, len);
flush_icache_user_range(vma, page, vaddr, len);
}
void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, void *dst, const void *src,
unsigned long len)
{
flush_cache_page(vma, vaddr, page_to_pfn(page));
memcpy(dst, src, len);
}
#endif #endif
/* /*
* arch/sh/mm/pg-sh4.c * arch/sh/mm/pg-mmu.c
* *
* Copyright (C) 1999, 2000, 2002 Niibe Yutaka * Copyright (C) 1999, 2000, 2002 Niibe Yutaka
* Copyright (C) 2002 - 2009 Paul Mundt * Copyright (C) 2002 - 2009 Paul Mundt
...@@ -22,11 +22,13 @@ static pte_t *kmap_coherent_pte; ...@@ -22,11 +22,13 @@ static pte_t *kmap_coherent_pte;
void __init kmap_coherent_init(void) void __init kmap_coherent_init(void)
{ {
#if defined(CONFIG_CPU_SH4) || defined(CONFIG_SH7705_CACHE_32KB)
unsigned long vaddr; unsigned long vaddr;
/* cache the first coherent kmap pte */ /* cache the first coherent kmap pte */
vaddr = __fix_to_virt(FIX_CMAP_BEGIN); vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
kmap_coherent_pte = kmap_get_fixmap_pte(vaddr); kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
#endif
} }
static inline void *kmap_coherent(struct page *page, unsigned long addr) static inline void *kmap_coherent(struct page *page, unsigned long addr)
...@@ -62,13 +64,15 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page, ...@@ -62,13 +64,15 @@ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, void *dst, const void *src, unsigned long vaddr, void *dst, const void *src,
unsigned long len) unsigned long len)
{ {
if (page_mapped(page) && !test_bit(PG_dcache_dirty, &page->flags)) { if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
!test_bit(PG_dcache_dirty, &page->flags)) {
void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); void *vto = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(vto, src, len); memcpy(vto, src, len);
kunmap_coherent(vto); kunmap_coherent(vto);
} else { } else {
memcpy(dst, src, len); memcpy(dst, src, len);
set_bit(PG_dcache_dirty, &page->flags); if (boot_cpu_data.dcache.n_aliases)
set_bit(PG_dcache_dirty, &page->flags);
} }
if (vma->vm_flags & VM_EXEC) if (vma->vm_flags & VM_EXEC)
...@@ -79,13 +83,15 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page, ...@@ -79,13 +83,15 @@ void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, void *dst, const void *src, unsigned long vaddr, void *dst, const void *src,
unsigned long len) unsigned long len)
{ {
if (page_mapped(page) && !test_bit(PG_dcache_dirty, &page->flags)) { if (boot_cpu_data.dcache.n_aliases && page_mapped(page) &&
!test_bit(PG_dcache_dirty, &page->flags)) {
void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK); void *vfrom = kmap_coherent(page, vaddr) + (vaddr & ~PAGE_MASK);
memcpy(dst, vfrom, len); memcpy(dst, vfrom, len);
kunmap_coherent(vfrom); kunmap_coherent(vfrom);
} else { } else {
memcpy(dst, src, len); memcpy(dst, src, len);
set_bit(PG_dcache_dirty, &page->flags); if (boot_cpu_data.dcache.n_aliases)
set_bit(PG_dcache_dirty, &page->flags);
} }
} }
...@@ -96,7 +102,8 @@ void copy_user_highpage(struct page *to, struct page *from, ...@@ -96,7 +102,8 @@ void copy_user_highpage(struct page *to, struct page *from,
vto = kmap_atomic(to, KM_USER1); vto = kmap_atomic(to, KM_USER1);
if (page_mapped(from) && !test_bit(PG_dcache_dirty, &from->flags)) { if (boot_cpu_data.dcache.n_aliases && page_mapped(from) &&
!test_bit(PG_dcache_dirty, &from->flags)) {
vfrom = kmap_coherent(from, vaddr); vfrom = kmap_coherent(from, vaddr);
copy_page(vto, vfrom); copy_page(vto, vfrom);
kunmap_coherent(vfrom); kunmap_coherent(vfrom);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment