Commit c728252c authored by Arjan van de Ven's avatar Arjan van de Ven Committed by Linus Torvalds

[PATCH] x86/x86_64: mark rodata section read only: generic x86-64 bugfix

Bug fix required for the .rodata work on x86-64:

when change_page_attr() and friends need to break up a 2Mb page into 4Kb
pages, it always set the NX bit on the PMD, which causes the cpu to consider
the entire 2Mb region to be NX regardless of the actual PTE perms.  This is
fine in general, with one big exception: the 2Mb page that covers the last
part of the kernel .text!  The fix is to not invent a new permission for the
new PMD entry, but to just inherit the existing one minus the PSE bit.
Signed-off-by: default avatarArjan van de Ven <arjan@infradead.org>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Cc: Andi Kleen <ak@muc.de>
Signed-off-by: default avatarAndrew Morton <akpm@osdl.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@osdl.org>
parent 63aaf308
...@@ -128,6 +128,7 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot, ...@@ -128,6 +128,7 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
pte_t *kpte; pte_t *kpte;
struct page *kpte_page; struct page *kpte_page;
unsigned kpte_flags; unsigned kpte_flags;
pgprot_t ref_prot2;
kpte = lookup_address(address); kpte = lookup_address(address);
if (!kpte) return 0; if (!kpte) return 0;
kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK); kpte_page = virt_to_page(((unsigned long)kpte) & PAGE_MASK);
...@@ -140,10 +141,14 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot, ...@@ -140,10 +141,14 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
* split_large_page will take the reference for this change_page_attr * split_large_page will take the reference for this change_page_attr
* on the split page. * on the split page.
*/ */
struct page *split = split_large_page(address, prot, ref_prot);
struct page *split;
ref_prot2 = __pgprot(pgprot_val(pte_pgprot(*lookup_address(address))) & ~(1<<_PAGE_BIT_PSE));
split = split_large_page(address, prot, ref_prot2);
if (!split) if (!split)
return -ENOMEM; return -ENOMEM;
set_pte(kpte,mk_pte(split, ref_prot)); set_pte(kpte,mk_pte(split, ref_prot2));
kpte_page = split; kpte_page = split;
} }
get_page(kpte_page); get_page(kpte_page);
......
...@@ -122,6 +122,8 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long ...@@ -122,6 +122,8 @@ static inline pte_t ptep_get_and_clear_full(struct mm_struct *mm, unsigned long
#define pte_same(a, b) ((a).pte == (b).pte) #define pte_same(a, b) ((a).pte == (b).pte)
#define pte_pgprot(a) (__pgprot((a).pte & ~PHYSICAL_PAGE_MASK))
#define PMD_SIZE (1UL << PMD_SHIFT) #define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1)) #define PMD_MASK (~(PMD_SIZE-1))
#define PUD_SIZE (1UL << PUD_SHIFT) #define PUD_SIZE (1UL << PUD_SHIFT)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment