Commit 1c9d3df5 authored by Richard Purdie's avatar Richard Purdie Committed by Russell King

[ARM] 4078/1: Fix ARM copypage cache coherency problems

If PG_dcache_dirty is set for a page, we need to flush the source page
before performing any copypage operation using a different virtual address.

This fixes the copypage implementations for XScale, StrongARM and ARMv6.

This patch fixes segmentation faults seen in the dynamic linker under
the usage patterns in glibc 2.4/2.5.
Signed-off-by: default avatarRichard Purdie <rpurdie@rpsys.net>
Signed-off-by: default avatarRussell King <rmk+kernel@arm.linux.org.uk>
parent b0b1d60a
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include "mm.h" #include "mm.h"
...@@ -69,6 +70,11 @@ mc_copy_user_page(void *from, void *to) ...@@ -69,6 +70,11 @@ mc_copy_user_page(void *from, void *to)
void v4_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) void v4_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
{ {
struct page *page = virt_to_page(kfrom);
if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
__flush_dcache_page(page_mapping(page), page);
spin_lock(&minicache_lock); spin_lock(&minicache_lock);
set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0); set_pte_ext(TOP_PTE(0xffff8000), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0);
......
...@@ -53,6 +53,10 @@ static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned lo ...@@ -53,6 +53,10 @@ static void v6_copy_user_page_aliasing(void *kto, const void *kfrom, unsigned lo
{ {
unsigned int offset = CACHE_COLOUR(vaddr); unsigned int offset = CACHE_COLOUR(vaddr);
unsigned long from, to; unsigned long from, to;
struct page *page = virt_to_page(kfrom);
if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
__flush_dcache_page(page_mapping(page), page);
/* /*
* Discard data in the kernel mapping for the new page. * Discard data in the kernel mapping for the new page.
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/cacheflush.h>
#include "mm.h" #include "mm.h"
...@@ -91,6 +92,11 @@ mc_copy_user_page(void *from, void *to) ...@@ -91,6 +92,11 @@ mc_copy_user_page(void *from, void *to)
void xscale_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr) void xscale_mc_copy_user_page(void *kto, const void *kfrom, unsigned long vaddr)
{ {
struct page *page = virt_to_page(kfrom);
if (test_and_clear_bit(PG_dcache_dirty, &page->flags))
__flush_dcache_page(page_mapping(page), page);
spin_lock(&minicache_lock); spin_lock(&minicache_lock);
set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0); set_pte_ext(TOP_PTE(COPYPAGE_MINICACHE), pfn_pte(__pa(kfrom) >> PAGE_SHIFT, minicache_pgprot), 0);
......
...@@ -355,6 +355,8 @@ extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, ...@@ -355,6 +355,8 @@ extern void flush_ptrace_access(struct vm_area_struct *vma, struct page *page,
*/ */
extern void flush_dcache_page(struct page *); extern void flush_dcache_page(struct page *);
extern void __flush_dcache_page(struct address_space *mapping, struct page *page);
#define flush_dcache_mmap_lock(mapping) \ #define flush_dcache_mmap_lock(mapping) \
write_lock_irq(&(mapping)->tree_lock) write_lock_irq(&(mapping)->tree_lock)
#define flush_dcache_mmap_unlock(mapping) \ #define flush_dcache_mmap_unlock(mapping) \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment