Commit a753499d authored by Michal Simek's avatar Michal Simek

microblaze: mm: Flush TLB to ensure correct mapping when higmem ON

MMU contains invalid mapping which wasn't flushed and new mapping
is using the same addresses as previous one. That's why TLB miss is not
happening to get new correct TLB entry and MMU points to incorrect area.

This is replicatable when large files(256MB and more) are copied and
checked.
Signed-off-by: default avatarMichal Simek <michal.simek@xilinx.com>
parent 83f0124a
...@@ -60,6 +60,7 @@ void __kunmap_atomic(void *kvaddr) ...@@ -60,6 +60,7 @@ void __kunmap_atomic(void *kvaddr)
{ {
unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
int type; int type;
unsigned int idx;
if (vaddr < __fix_to_virt(FIX_KMAP_END)) { if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
pagefault_enable(); pagefault_enable();
...@@ -68,21 +69,18 @@ void __kunmap_atomic(void *kvaddr) ...@@ -68,21 +69,18 @@ void __kunmap_atomic(void *kvaddr)
} }
type = kmap_atomic_idx(); type = kmap_atomic_idx();
#ifdef CONFIG_DEBUG_HIGHMEM
{
unsigned int idx;
idx = type + KM_TYPE_NR * smp_processor_id(); idx = type + KM_TYPE_NR * smp_processor_id();
#ifdef CONFIG_DEBUG_HIGHMEM
BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
#endif
/* /*
* force other mappings to Oops if they'll try to access * force other mappings to Oops if they'll try to access
* this pte without first remap it * this pte without first remap it
*/ */
pte_clear(&init_mm, vaddr, kmap_pte-idx); pte_clear(&init_mm, vaddr, kmap_pte-idx);
local_flush_tlb_page(NULL, vaddr); local_flush_tlb_page(NULL, vaddr);
}
#endif
kmap_atomic_idx_pop(); kmap_atomic_idx_pop();
pagefault_enable(); pagefault_enable();
preempt_enable(); preempt_enable();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment