Commit 38e00d34 authored by Paolo 'Blaisorblade' Giarrusso's avatar Paolo 'Blaisorblade' Giarrusso Committed by Chris Wright

[PATCH] uml - Fix x86_64 page leak

We were leaking pmd pages when 3_LEVEL_PGTABLES was enabled. This fixes that,
has been well tested and is included in mainline tree. Please include in -stable
as well.
Signed-off-by: default avatarJeff Dike <jdike@addtoit.com>
Signed-off-by: default avatarPaolo 'Blaisorblade' Giarrusso <blaisorblade@yahoo.it>
Signed-off-by: default avatarChris Wright <chrisw@osdl.org>
parent 17d6b7c8
...@@ -6,11 +6,15 @@ ...@@ -6,11 +6,15 @@
#ifndef __SKAS_MMU_H #ifndef __SKAS_MMU_H
#define __SKAS_MMU_H #define __SKAS_MMU_H
#include "linux/config.h"
#include "mm_id.h" #include "mm_id.h"
struct mmu_context_skas { struct mmu_context_skas {
struct mm_id id; struct mm_id id;
unsigned long last_page_table; unsigned long last_page_table;
#ifdef CONFIG_3_LEVEL_PGTABLES
unsigned long last_pmd;
#endif
}; };
extern void switch_mm_skas(struct mm_id * mm_idp); extern void switch_mm_skas(struct mm_id * mm_idp);
......
...@@ -56,6 +56,9 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc, ...@@ -56,6 +56,9 @@ static int init_stub_pte(struct mm_struct *mm, unsigned long proc,
*/ */
mm->context.skas.last_page_table = pmd_page_kernel(*pmd); mm->context.skas.last_page_table = pmd_page_kernel(*pmd);
#ifdef CONFIG_3_LEVEL_PGTABLES
mm->context.skas.last_pmd = (unsigned long) __va(pud_val(*pud));
#endif
*pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT)); *pte = mk_pte(virt_to_page(kernel), __pgprot(_PAGE_PRESENT));
*pte = pte_mkexec(*pte); *pte = pte_mkexec(*pte);
...@@ -140,6 +143,10 @@ void destroy_context_skas(struct mm_struct *mm) ...@@ -140,6 +143,10 @@ void destroy_context_skas(struct mm_struct *mm)
else { else {
os_kill_ptraced_process(mmu->id.u.pid, 1); os_kill_ptraced_process(mmu->id.u.pid, 1);
free_page(mmu->id.stack); free_page(mmu->id.stack);
free_page(mmu->last_page_table); pte_free_kernel((pte_t *) mmu->last_page_table);
dec_page_state(nr_page_table_pages);
#ifdef CONFIG_3_LEVEL_PGTABLES
pmd_free((pmd_t *) mmu->last_pmd);
#endif
} }
} }
...@@ -42,11 +42,13 @@ static inline void pte_free(struct page *pte) ...@@ -42,11 +42,13 @@ static inline void pte_free(struct page *pte)
#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte)) #define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
#ifdef CONFIG_3_LEVEL_PGTABLES #ifdef CONFIG_3_LEVEL_PGTABLES
/*
* In the 3-level case we free the pmds as part of the pgd. extern __inline__ void pmd_free(pmd_t *pmd)
*/ {
#define pmd_free(x) do { } while (0) free_page((unsigned long)pmd);
#define __pmd_free_tlb(tlb,x) do { } while (0) }
#define __pmd_free_tlb(tlb,x) tlb_remove_page((tlb),virt_to_page(x))
#endif #endif
#define check_pgt_cache() do { } while (0) #define check_pgt_cache() do { } while (0)
......
...@@ -98,14 +98,11 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address) ...@@ -98,14 +98,11 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
return pmd; return pmd;
} }
static inline void pmd_free(pmd_t *pmd){ extern inline void pud_clear (pud_t *pud)
free_page((unsigned long) pmd); {
set_pud(pud, __pud(0));
} }
#define __pmd_free_tlb(tlb,x) do { } while (0)
static inline void pud_clear (pud_t * pud) { }
#define pud_page(pud) \ #define pud_page(pud) \
((struct page *) __va(pud_val(pud) & PAGE_MASK)) ((struct page *) __va(pud_val(pud) & PAGE_MASK))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment