Commit 0b9c1280 authored by David S. Miller's avatar David S. Miller

[SPARC64]: Two io_remap_page_range() fixes.

1) BUG on presence of existing mappings just
   as remap_pfn_range does
2) Perform TLB flush while holding page_table_lock
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 9fbd4851
...@@ -15,14 +15,6 @@ ...@@ -15,14 +15,6 @@
#include <asm/page.h> #include <asm/page.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
static inline void forget_pte(pte_t page)
{
if (!pte_none(page)) {
printk("forget_pte: old mapping existed!\n");
BUG();
}
}
/* Remap IO memory, the same way as remap_pfn_range(), but use /* Remap IO memory, the same way as remap_pfn_range(), but use
* the obio memory space. * the obio memory space.
* *
...@@ -43,7 +35,6 @@ static inline void io_remap_pte_range(pte_t * pte, unsigned long address, unsign ...@@ -43,7 +35,6 @@ static inline void io_remap_pte_range(pte_t * pte, unsigned long address, unsign
if (end > PMD_SIZE) if (end > PMD_SIZE)
end = PMD_SIZE; end = PMD_SIZE;
do { do {
pte_t oldpage;
pte_t entry; pte_t entry;
unsigned long curend = address + PAGE_SIZE; unsigned long curend = address + PAGE_SIZE;
...@@ -75,10 +66,8 @@ static inline void io_remap_pte_range(pte_t * pte, unsigned long address, unsign ...@@ -75,10 +66,8 @@ static inline void io_remap_pte_range(pte_t * pte, unsigned long address, unsign
if (offset & 0x1UL) if (offset & 0x1UL)
pte_val(entry) &= ~(_PAGE_E); pte_val(entry) &= ~(_PAGE_E);
do { do {
oldpage = *pte; BUG_ON(!pte_none(*pte));
pte_clear(pte);
set_pte(pte, entry); set_pte(pte, entry);
forget_pte(oldpage);
address += PAGE_SIZE; address += PAGE_SIZE;
pte++; pte++;
} while (address < curend); } while (address < curend);
...@@ -132,8 +121,8 @@ int io_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned ...@@ -132,8 +121,8 @@ int io_remap_page_range(struct vm_area_struct *vma, unsigned long from, unsigned
from = (from + PGDIR_SIZE) & PGDIR_MASK; from = (from + PGDIR_SIZE) & PGDIR_MASK;
dir++; dir++;
} }
flush_tlb_range(vma, beg, end);
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
flush_tlb_range(vma, beg, end);
return error; return error;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment