Commit 03973c57 authored by Max Filippov's avatar Max Filippov Committed by Greg Kroah-Hartman

xtensa: fix cache aliasing handling code for WT cache

commit 6d0f581d upstream.

Currently building kernel for xtensa core with aliasing WT cache fails
with the following messages:

  mm/memory.c:2152: undefined reference to `flush_dcache_page'
  mm/memory.c:2332: undefined reference to `local_flush_cache_page'
  mm/memory.c:1919: undefined reference to `local_flush_cache_range'
  mm/memory.c:4179: undefined reference to `copy_to_user_page'
  mm/memory.c:4183: undefined reference to `copy_from_user_page'

This happens because implementation of these functions is only compiled
when data cache is WB, which looks wrong: even when data cache doesn't
need flushing it still needs invalidation. The functions like
__flush_[invalidate_]dcache_* are correctly defined for both WB and WT
caches (and even if they weren't that'd still be ok, just slower).

Fix this by providing the same implementation of the above functions for
both WB and WT cache.
Signed-off-by: default avatarMax Filippov <jcmvbkbc@gmail.com>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 0041042d
...@@ -120,10 +120,6 @@ void copy_user_highpage(struct page *dst, struct page *src, ...@@ -120,10 +120,6 @@ void copy_user_highpage(struct page *dst, struct page *src,
preempt_enable(); preempt_enable();
} }
#endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK
/* /*
* Any time the kernel writes to a user page cache page, or it is about to * Any time the kernel writes to a user page cache page, or it is about to
* read from a page cache page this routine is called. * read from a page cache page this routine is called.
...@@ -208,7 +204,7 @@ void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address, ...@@ -208,7 +204,7 @@ void local_flush_cache_page(struct vm_area_struct *vma, unsigned long address,
__invalidate_icache_page_alias(virt, phys); __invalidate_icache_page_alias(virt, phys);
} }
#endif #endif /* DCACHE_WAY_SIZE > PAGE_SIZE */
void void
update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
...@@ -225,7 +221,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) ...@@ -225,7 +221,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
flush_tlb_page(vma, addr); flush_tlb_page(vma, addr);
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK #if (DCACHE_WAY_SIZE > PAGE_SIZE)
if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) { if (!PageReserved(page) && test_bit(PG_arch_1, &page->flags)) {
unsigned long phys = page_to_phys(page); unsigned long phys = page_to_phys(page);
...@@ -256,7 +252,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep) ...@@ -256,7 +252,7 @@ update_mmu_cache(struct vm_area_struct * vma, unsigned long addr, pte_t *ptep)
* flush_dcache_page() on the page. * flush_dcache_page() on the page.
*/ */
#if (DCACHE_WAY_SIZE > PAGE_SIZE) && XCHAL_DCACHE_IS_WRITEBACK #if (DCACHE_WAY_SIZE > PAGE_SIZE)
void copy_to_user_page(struct vm_area_struct *vma, struct page *page, void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
unsigned long vaddr, void *dst, const void *src, unsigned long vaddr, void *dst, const void *src,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment