Commit f8e8784a authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] s390: tlb flush race.

From: Martin Schwidefsky <schwidefsky@de.ibm.com>

I think I found a potential race in install_page/install_file_pte. The
inline function zap_pte releases pages by calling page_remove_rmap and
page_cache_release.  If this was the last user of a page it can get
purged from the page cache and then get immediatly reused. But there
might still be a tlb for this page on another cpu. The tlb is removed
in the callers of zap_pte, install_page and install_file_pte, but this
is too late. I admit that its a very unlikely race but never the less..

I fixed this by using the new ptep_clear_flush function that is introduced
with the tlb flush optimization patch for s/390.
parent 465235cb
...@@ -19,18 +19,18 @@ ...@@ -19,18 +19,18 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
static inline int zap_pte(struct mm_struct *mm, struct vm_area_struct *vma, static inline void zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep) unsigned long addr, pte_t *ptep)
{ {
pte_t pte = *ptep; pte_t pte = *ptep;
if (pte_none(pte)) if (pte_none(pte))
return 0; return;
if (pte_present(pte)) { if (pte_present(pte)) {
unsigned long pfn = pte_pfn(pte); unsigned long pfn = pte_pfn(pte);
flush_cache_page(vma, addr); flush_cache_page(vma, addr);
pte = ptep_get_and_clear(ptep); pte = ptep_clear_flush(vma, addr, ptep);
if (pfn_valid(pfn)) { if (pfn_valid(pfn)) {
struct page *page = pfn_to_page(pfn); struct page *page = pfn_to_page(pfn);
if (!PageReserved(page)) { if (!PageReserved(page)) {
...@@ -41,12 +41,10 @@ static inline int zap_pte(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -41,12 +41,10 @@ static inline int zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
mm->rss--; mm->rss--;
} }
} }
return 1;
} else { } else {
if (!pte_file(pte)) if (!pte_file(pte))
free_swap_and_cache(pte_to_swp_entry(pte)); free_swap_and_cache(pte_to_swp_entry(pte));
pte_clear(ptep); pte_clear(ptep);
return 0;
} }
} }
...@@ -57,7 +55,7 @@ static inline int zap_pte(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -57,7 +55,7 @@ static inline int zap_pte(struct mm_struct *mm, struct vm_area_struct *vma,
int install_page(struct mm_struct *mm, struct vm_area_struct *vma, int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, struct page *page, pgprot_t prot) unsigned long addr, struct page *page, pgprot_t prot)
{ {
int err = -ENOMEM, flush; int err = -ENOMEM;
pte_t *pte; pte_t *pte;
pgd_t *pgd; pgd_t *pgd;
pmd_t *pmd; pmd_t *pmd;
...@@ -78,7 +76,7 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -78,7 +76,7 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
if (!pte) if (!pte)
goto err_unlock; goto err_unlock;
flush = zap_pte(mm, vma, addr, pte); zap_pte(mm, vma, addr, pte);
mm->rss++; mm->rss++;
flush_icache_page(vma, page); flush_icache_page(vma, page);
...@@ -86,8 +84,6 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -86,8 +84,6 @@ int install_page(struct mm_struct *mm, struct vm_area_struct *vma,
pte_chain = page_add_rmap(page, pte, pte_chain); pte_chain = page_add_rmap(page, pte, pte_chain);
pte_val = *pte; pte_val = *pte;
pte_unmap(pte); pte_unmap(pte);
if (flush)
flush_tlb_page(vma, addr);
update_mmu_cache(vma, addr, pte_val); update_mmu_cache(vma, addr, pte_val);
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
pte_chain_free(pte_chain); pte_chain_free(pte_chain);
...@@ -109,7 +105,7 @@ EXPORT_SYMBOL(install_page); ...@@ -109,7 +105,7 @@ EXPORT_SYMBOL(install_page);
int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
unsigned long addr, unsigned long pgoff, pgprot_t prot) unsigned long addr, unsigned long pgoff, pgprot_t prot)
{ {
int err = -ENOMEM, flush; int err = -ENOMEM;
pte_t *pte; pte_t *pte;
pgd_t *pgd; pgd_t *pgd;
pmd_t *pmd; pmd_t *pmd;
...@@ -126,13 +122,11 @@ int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma, ...@@ -126,13 +122,11 @@ int install_file_pte(struct mm_struct *mm, struct vm_area_struct *vma,
if (!pte) if (!pte)
goto err_unlock; goto err_unlock;
flush = zap_pte(mm, vma, addr, pte); zap_pte(mm, vma, addr, pte);
set_pte(pte, pgoff_to_pte(pgoff)); set_pte(pte, pgoff_to_pte(pgoff));
pte_val = *pte; pte_val = *pte;
pte_unmap(pte); pte_unmap(pte);
if (flush)
flush_tlb_page(vma, addr);
update_mmu_cache(vma, addr, pte_val); update_mmu_cache(vma, addr, pte_val);
spin_unlock(&mm->page_table_lock); spin_unlock(&mm->page_table_lock);
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment