Commit 7ef93905 authored by Jeff Dike's avatar Jeff Dike Committed by Linus Torvalds

[PATCH] uml: fix x86_64 page leak

We were leaking pmd pages when 3_LEVEL_PGTABLES was enabled.  This fixes that.
Signed-off-by: default avatarJeff Dike <jdike@addtoit.com>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent f9dfefe4
...@@ -6,11 +6,15 @@ ...@@ -6,11 +6,15 @@
#ifndef __SKAS_MMU_H #ifndef __SKAS_MMU_H
#define __SKAS_MMU_H #define __SKAS_MMU_H
#include "linux/config.h"
#include "mm_id.h" #include "mm_id.h"
struct mmu_context_skas { struct mmu_context_skas {
struct mm_id id; struct mm_id id;
unsigned long last_page_table; unsigned long last_page_table;
#ifdef CONFIG_3_LEVEL_PGTABLES
unsigned long last_pmd;
#endif
}; };
extern void switch_mm_skas(struct mm_id * mm_idp); extern void switch_mm_skas(struct mm_id * mm_idp);
......
...@@ -56,6 +56,9 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc, ...@@ -56,6 +56,9 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
*/ */
mm->context.skas.last_page_table = pmd_page_kernel(*pmd); mm->context.skas.last_page_table = pmd_page_kernel(*pmd);
#ifdef CONFIG_3_LEVEL_PGTABLES
mm->context.skas.last_pmd = (unsigned long) __va(pud_val(*pud));
#endif
*pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT)); *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
*pte = pte_mkexec(*pte); *pte = pte_mkexec(*pte);
...@@ -144,6 +147,10 @@ void destroy_context_skas(struct mm_struct *mm) ...@@ -144,6 +147,10 @@ void destroy_context_skas(struct mm_struct *mm)
if(!proc_mm || !ptrace_faultinfo){ if(!proc_mm || !ptrace_faultinfo){
free_page(mmu->id.stack); free_page(mmu->id.stack);
free_page(mmu->last_page_table); pte_free_kernel((pte_t *) mmu->last_page_table);
dec_page_state(nr_page_table_pages);
#ifdef CONFIG_3_LEVEL_PGTABLES
pmd_free((pmd_t *) mmu->last_pmd);
#endif
} }
} }
...@@ -42,11 +42,13 @@ static inline void pte_free(struct page *pte) ...@@ -42,11 +42,13 @@ static inline void pte_free(struct page *pte)
#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte)) #define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
#ifdef CONFIG_3_LEVEL_PGTABLES #ifdef CONFIG_3_LEVEL_PGTABLES
/*
* In the 3-level case we free the pmds as part of the pgd. extern __inline__ void pmd_free(pmd_t *pmd)
*/ {
#define pmd_free(x) do { } while (0) free_page((unsigned long)pmd);
#define __pmd_free_tlb(tlb,x) do { } while (0) }
#define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
#endif #endif
#define check_pgt_cache() do { } while (0) #define check_pgt_cache() do { } while (0)
......
...@@ -69,14 +69,11 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) ...@@ -69,14 +69,11 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
return pmd; return pmd;
} }
static inline void pmd_free(pmd_t *pmd){ extern inline void pud_clear (pud_t *pud)
free_page((unsigned long) pmd); {
set_pud(pud, __pud(0));
} }
#define __pmd_free_tlb(tlb,x) do { } while (0)
static inline void pud_clear (pud_t * pud) { }
#define pud_page(pud) \ #define pud_page(pud) \
((struct page *) __va(pud_val(pud) & PAGE_MASK)) ((struct page *) __va(pud_val(pud) & PAGE_MASK))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment