Commit e3ebadd9 authored by Linus Torvalds's avatar Linus Torvalds

Revert "[PATCH] x86: __pa and __pa_symbol address space separation"

This was broken.  It adds complexity, for no good reason.  Rather than
separate __pa() and __pa_symbol(), we should deprecate __pa_symbol(),
and preferably __pa() too - and just use "virt_to_phys()" instead, which
is more readable and has nicer semantics.

However, right now, just undo the separation, and make __pa_symbol() be
the exact same as __pa().  That fixes the bugs this patch introduced,
and we can do the fairly obvious cleanups later.

Do the new __phys_addr() function (which is now the actual workhorse for
the unified __pa()/__pa_symbol()) as a real external function, that way
all the potential issues with compile/link-time optimizations of
constant symbol addresses go away, and we can also, if we choose to, add
more sanity-checking of the argument.

Cc: Eric W. Biederman <ebiederm@xmission.com>
Cc: Vivek Goyal <vgoyal@in.ibm.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 15700770
...@@ -390,8 +390,8 @@ void __init alternative_instructions(void) ...@@ -390,8 +390,8 @@ void __init alternative_instructions(void)
_text, _etext); _text, _etext);
} }
free_init_pages("SMP alternatives", free_init_pages("SMP alternatives",
__pa_symbol(&__smp_locks), (unsigned long)__smp_locks,
__pa_symbol(&__smp_locks_end)); (unsigned long)__smp_locks_end);
} else { } else {
alternatives_smp_module_add(NULL, "core kernel", alternatives_smp_module_add(NULL, "core kernel",
__smp_locks, __smp_locks_end, __smp_locks, __smp_locks_end,
......
...@@ -843,11 +843,10 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) ...@@ -843,11 +843,10 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
unsigned long addr; unsigned long addr;
for (addr = begin; addr < end; addr += PAGE_SIZE) { for (addr = begin; addr < end; addr += PAGE_SIZE) {
struct page *page = pfn_to_page(addr >> PAGE_SHIFT); ClearPageReserved(virt_to_page(addr));
ClearPageReserved(page); init_page_count(virt_to_page(addr));
init_page_count(page); memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
memset(page_address(page), POISON_FREE_INITMEM, PAGE_SIZE); free_page(addr);
__free_page(page);
totalram_pages++; totalram_pages++;
} }
printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
...@@ -856,14 +855,14 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) ...@@ -856,14 +855,14 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
void free_initmem(void) void free_initmem(void)
{ {
free_init_pages("unused kernel memory", free_init_pages("unused kernel memory",
__pa_symbol(&__init_begin), (unsigned long)(&__init_begin),
__pa_symbol(&__init_end)); (unsigned long)(&__init_end));
} }
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end) void free_initrd_mem(unsigned long start, unsigned long end)
{ {
free_init_pages("initrd memory", __pa(start), __pa(end)); free_init_pages("initrd memory", start, end);
} }
#endif #endif
...@@ -189,21 +189,21 @@ NORET_TYPE void machine_kexec(struct kimage *image) ...@@ -189,21 +189,21 @@ NORET_TYPE void machine_kexec(struct kimage *image)
control_page = page_address(image->control_code_page) + PAGE_SIZE; control_page = page_address(image->control_code_page) + PAGE_SIZE;
memcpy(control_page, relocate_kernel, PAGE_SIZE); memcpy(control_page, relocate_kernel, PAGE_SIZE);
page_list[PA_CONTROL_PAGE] = __pa(control_page); page_list[PA_CONTROL_PAGE] = virt_to_phys(control_page);
page_list[VA_CONTROL_PAGE] = (unsigned long)relocate_kernel; page_list[VA_CONTROL_PAGE] = (unsigned long)relocate_kernel;
page_list[PA_PGD] = __pa_symbol(&kexec_pgd); page_list[PA_PGD] = virt_to_phys(&kexec_pgd);
page_list[VA_PGD] = (unsigned long)kexec_pgd; page_list[VA_PGD] = (unsigned long)kexec_pgd;
page_list[PA_PUD_0] = __pa_symbol(&kexec_pud0); page_list[PA_PUD_0] = virt_to_phys(&kexec_pud0);
page_list[VA_PUD_0] = (unsigned long)kexec_pud0; page_list[VA_PUD_0] = (unsigned long)kexec_pud0;
page_list[PA_PMD_0] = __pa_symbol(&kexec_pmd0); page_list[PA_PMD_0] = virt_to_phys(&kexec_pmd0);
page_list[VA_PMD_0] = (unsigned long)kexec_pmd0; page_list[VA_PMD_0] = (unsigned long)kexec_pmd0;
page_list[PA_PTE_0] = __pa_symbol(&kexec_pte0); page_list[PA_PTE_0] = virt_to_phys(&kexec_pte0);
page_list[VA_PTE_0] = (unsigned long)kexec_pte0; page_list[VA_PTE_0] = (unsigned long)kexec_pte0;
page_list[PA_PUD_1] = __pa_symbol(&kexec_pud1); page_list[PA_PUD_1] = virt_to_phys(&kexec_pud1);
page_list[VA_PUD_1] = (unsigned long)kexec_pud1; page_list[VA_PUD_1] = (unsigned long)kexec_pud1;
page_list[PA_PMD_1] = __pa_symbol(&kexec_pmd1); page_list[PA_PMD_1] = virt_to_phys(&kexec_pmd1);
page_list[VA_PMD_1] = (unsigned long)kexec_pmd1; page_list[VA_PMD_1] = (unsigned long)kexec_pmd1;
page_list[PA_PTE_1] = __pa_symbol(&kexec_pte1); page_list[PA_PTE_1] = virt_to_phys(&kexec_pte1);
page_list[VA_PTE_1] = (unsigned long)kexec_pte1; page_list[VA_PTE_1] = (unsigned long)kexec_pte1;
page_list[PA_TABLE_PAGE] = page_list[PA_TABLE_PAGE] =
......
...@@ -245,12 +245,11 @@ void __init setup_arch(char **cmdline_p) ...@@ -245,12 +245,11 @@ void __init setup_arch(char **cmdline_p)
init_mm.end_code = (unsigned long) &_etext; init_mm.end_code = (unsigned long) &_etext;
init_mm.end_data = (unsigned long) &_edata; init_mm.end_data = (unsigned long) &_edata;
init_mm.brk = (unsigned long) &_end; init_mm.brk = (unsigned long) &_end;
init_mm.pgd = __va(__pa_symbol(&init_level4_pgt));
code_resource.start = __pa_symbol(&_text); code_resource.start = virt_to_phys(&_text);
code_resource.end = __pa_symbol(&_etext)-1; code_resource.end = virt_to_phys(&_etext)-1;
data_resource.start = __pa_symbol(&_etext); data_resource.start = virt_to_phys(&_etext);
data_resource.end = __pa_symbol(&_edata)-1; data_resource.end = virt_to_phys(&_edata)-1;
early_identify_cpu(&boot_cpu_data); early_identify_cpu(&boot_cpu_data);
......
...@@ -76,7 +76,7 @@ static inline void leave_mm(int cpu) ...@@ -76,7 +76,7 @@ static inline void leave_mm(int cpu)
if (read_pda(mmu_state) == TLBSTATE_OK) if (read_pda(mmu_state) == TLBSTATE_OK)
BUG(); BUG();
cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask); cpu_clear(cpu, read_pda(active_mm)->cpu_vm_mask);
load_cr3(init_mm.pgd); load_cr3(swapper_pg_dir);
} }
/* /*
......
...@@ -572,13 +572,13 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) ...@@ -572,13 +572,13 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10); printk(KERN_INFO "Freeing %s: %luk freed\n", what, (end - begin) >> 10);
for (addr = begin; addr < end; addr += PAGE_SIZE) { for (addr = begin; addr < end; addr += PAGE_SIZE) {
struct page *page = pfn_to_page(addr >> PAGE_SHIFT); ClearPageReserved(virt_to_page(addr));
ClearPageReserved(page); init_page_count(virt_to_page(addr));
init_page_count(page); memset((void *)(addr & ~(PAGE_SIZE-1)),
memset(page_address(page), POISON_FREE_INITMEM, PAGE_SIZE); POISON_FREE_INITMEM, PAGE_SIZE);
if (addr >= __START_KERNEL_map) if (addr >= __START_KERNEL_map)
change_page_attr_addr(addr, 1, __pgprot(0)); change_page_attr_addr(addr, 1, __pgprot(0));
__free_page(page); free_page(addr);
totalram_pages++; totalram_pages++;
} }
if (addr > __START_KERNEL_map) if (addr > __START_KERNEL_map)
...@@ -588,26 +588,31 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end) ...@@ -588,26 +588,31 @@ void free_init_pages(char *what, unsigned long begin, unsigned long end)
void free_initmem(void) void free_initmem(void)
{ {
free_init_pages("unused kernel memory", free_init_pages("unused kernel memory",
__pa_symbol(&__init_begin), (unsigned long)(&__init_begin),
__pa_symbol(&__init_end)); (unsigned long)(&__init_end));
} }
#ifdef CONFIG_DEBUG_RODATA #ifdef CONFIG_DEBUG_RODATA
void mark_rodata_ro(void) void mark_rodata_ro(void)
{ {
unsigned long start = PFN_ALIGN(__va(__pa_symbol(&_stext))), size; unsigned long start = (unsigned long)_stext, end;
#ifdef CONFIG_HOTPLUG_CPU #ifdef CONFIG_HOTPLUG_CPU
/* It must still be possible to apply SMP alternatives. */ /* It must still be possible to apply SMP alternatives. */
if (num_possible_cpus() > 1) if (num_possible_cpus() > 1)
start = PFN_ALIGN(__va(__pa_symbol(&_etext))); start = (unsigned long)_etext;
#endif #endif
size = (unsigned long)__va(__pa_symbol(&__end_rodata)) - start; end = (unsigned long)__end_rodata;
change_page_attr_addr(start, size >> PAGE_SHIFT, PAGE_KERNEL_RO); start = (start + PAGE_SIZE - 1) & PAGE_MASK;
end &= PAGE_MASK;
if (end <= start)
return;
change_page_attr_addr(start, (end - start) >> PAGE_SHIFT, PAGE_KERNEL_RO);
printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n", printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
size >> 10); (end - start) >> 10);
/* /*
* change_page_attr_addr() requires a global_flush_tlb() call after it. * change_page_attr_addr() requires a global_flush_tlb() call after it.
...@@ -622,7 +627,7 @@ void mark_rodata_ro(void) ...@@ -622,7 +627,7 @@ void mark_rodata_ro(void)
#ifdef CONFIG_BLK_DEV_INITRD #ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end) void free_initrd_mem(unsigned long start, unsigned long end)
{ {
free_init_pages("initrd memory", __pa(start), __pa(end)); free_init_pages("initrd memory", start, end);
} }
#endif #endif
......
...@@ -13,12 +13,21 @@ ...@@ -13,12 +13,21 @@
#include <linux/slab.h> #include <linux/slab.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/io.h> #include <linux/io.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/proto.h> #include <asm/proto.h>
unsigned long __phys_addr(unsigned long x)
{
if (x >= __START_KERNEL_map)
return x - __START_KERNEL_map + phys_base;
return x - PAGE_OFFSET;
}
EXPORT_SYMBOL(__phys_addr);
#define ISA_START_ADDRESS 0xa0000 #define ISA_START_ADDRESS 0xa0000
#define ISA_END_ADDRESS 0x100000 #define ISA_END_ADDRESS 0x100000
......
...@@ -51,6 +51,7 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot, ...@@ -51,6 +51,7 @@ static struct page *split_large_page(unsigned long address, pgprot_t prot,
SetPagePrivate(base); SetPagePrivate(base);
page_private(base) = 0; page_private(base) = 0;
address = __pa(address);
addr = address & LARGE_PAGE_MASK; addr = address & LARGE_PAGE_MASK;
pbase = (pte_t *)page_address(base); pbase = (pte_t *)page_address(base);
for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) { for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
...@@ -100,12 +101,13 @@ static inline void save_page(struct page *fpage) ...@@ -100,12 +101,13 @@ static inline void save_page(struct page *fpage)
* No more special protections in this 2/4MB area - revert to a * No more special protections in this 2/4MB area - revert to a
* large page again. * large page again.
*/ */
static void revert_page(unsigned long address, unsigned long pfn, pgprot_t ref_prot) static void revert_page(unsigned long address, pgprot_t ref_prot)
{ {
pgd_t *pgd; pgd_t *pgd;
pud_t *pud; pud_t *pud;
pmd_t *pmd; pmd_t *pmd;
pte_t large_pte; pte_t large_pte;
unsigned long pfn;
pgd = pgd_offset_k(address); pgd = pgd_offset_k(address);
BUG_ON(pgd_none(*pgd)); BUG_ON(pgd_none(*pgd));
...@@ -113,6 +115,7 @@ static void revert_page(unsigned long address, unsigned long pfn, pgprot_t ref_p ...@@ -113,6 +115,7 @@ static void revert_page(unsigned long address, unsigned long pfn, pgprot_t ref_p
BUG_ON(pud_none(*pud)); BUG_ON(pud_none(*pud));
pmd = pmd_offset(pud, address); pmd = pmd_offset(pud, address);
BUG_ON(pmd_val(*pmd) & _PAGE_PSE); BUG_ON(pmd_val(*pmd) & _PAGE_PSE);
pfn = (__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT;
large_pte = pfn_pte(pfn, ref_prot); large_pte = pfn_pte(pfn, ref_prot);
large_pte = pte_mkhuge(large_pte); large_pte = pte_mkhuge(large_pte);
set_pte((pte_t *)pmd, large_pte); set_pte((pte_t *)pmd, large_pte);
...@@ -138,8 +141,7 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot, ...@@ -138,8 +141,7 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
*/ */
struct page *split; struct page *split;
ref_prot2 = pte_pgprot(pte_clrhuge(*kpte)); ref_prot2 = pte_pgprot(pte_clrhuge(*kpte));
split = split_large_page(pfn << PAGE_SHIFT, prot, split = split_large_page(address, prot, ref_prot2);
ref_prot2);
if (!split) if (!split)
return -ENOMEM; return -ENOMEM;
set_pte(kpte, mk_pte(split, ref_prot2)); set_pte(kpte, mk_pte(split, ref_prot2));
...@@ -158,7 +160,7 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot, ...@@ -158,7 +160,7 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
if (page_private(kpte_page) == 0) { if (page_private(kpte_page) == 0) {
save_page(kpte_page); save_page(kpte_page);
revert_page(address, pfn, ref_prot); revert_page(address, ref_prot);
} }
return 0; return 0;
} }
...@@ -178,7 +180,6 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot, ...@@ -178,7 +180,6 @@ __change_page_attr(unsigned long address, unsigned long pfn, pgprot_t prot,
*/ */
int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot) int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
{ {
unsigned long phys_base_pfn = __pa_symbol(__START_KERNEL_map) >> PAGE_SHIFT;
int err = 0, kernel_map = 0; int err = 0, kernel_map = 0;
int i; int i;
...@@ -199,11 +200,10 @@ int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot) ...@@ -199,11 +200,10 @@ int change_page_attr_addr(unsigned long address, int numpages, pgprot_t prot)
} }
/* Handle kernel mapping too which aliases part of the /* Handle kernel mapping too which aliases part of the
* lowmem */ * lowmem */
if ((pfn >= phys_base_pfn) && if (__pa(address) < KERNEL_TEXT_SIZE) {
((pfn - phys_base_pfn) < (KERNEL_TEXT_SIZE >> PAGE_SHIFT))) {
unsigned long addr2; unsigned long addr2;
pgprot_t prot2; pgprot_t prot2;
addr2 = __START_KERNEL_map + ((pfn - phys_base_pfn) << PAGE_SHIFT); addr2 = __START_KERNEL_map + __pa(address);
/* Make sure the kernel mappings stay executable */ /* Make sure the kernel mappings stay executable */
prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot))); prot2 = pte_pgprot(pte_mkexec(pfn_pte(0, prot)));
err = __change_page_attr(addr2, pfn, prot2, err = __change_page_attr(addr2, pfn, prot2,
......
...@@ -94,26 +94,22 @@ extern unsigned long phys_base; ...@@ -94,26 +94,22 @@ extern unsigned long phys_base;
#define KERNEL_TEXT_SIZE (40*1024*1024) #define KERNEL_TEXT_SIZE (40*1024*1024)
#define KERNEL_TEXT_START 0xffffffff80000000 #define KERNEL_TEXT_START 0xffffffff80000000
#define PAGE_OFFSET __PAGE_OFFSET
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#include <asm/bug.h> #include <asm/bug.h>
#endif /* __ASSEMBLY__ */ extern unsigned long __phys_addr(unsigned long);
#define PAGE_OFFSET __PAGE_OFFSET #endif /* __ASSEMBLY__ */
/* Note: __pa(&symbol_visible_to_c) should be always replaced with __pa_symbol. #define __pa(x) __phys_addr((unsigned long)(x))
Otherwise you risk miscompilation. */ #define __pa_symbol(x) __phys_addr((unsigned long)(x))
#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET)
/* __pa_symbol should be used for C visible symbols.
This seems to be the official gcc blessed way to do such arithmetic. */
#define __pa_symbol(x) \
({unsigned long v; \
asm("" : "=r" (v) : "0" (x)); \
((v - __START_KERNEL_map) + phys_base); })
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET)) #define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
#define __boot_va(x) __va(x)
#define __boot_pa(x) __pa(x)
#ifdef CONFIG_FLATMEM #ifdef CONFIG_FLATMEM
#define pfn_valid(pfn) ((pfn) < end_pfn) #define pfn_valid(pfn) ((pfn) < end_pfn)
#endif #endif
......
...@@ -19,7 +19,7 @@ extern pmd_t level2_kernel_pgt[512]; ...@@ -19,7 +19,7 @@ extern pmd_t level2_kernel_pgt[512];
extern pgd_t init_level4_pgt[]; extern pgd_t init_level4_pgt[];
extern unsigned long __supported_pte_mask; extern unsigned long __supported_pte_mask;
#define swapper_pg_dir ((pgd_t *)NULL) #define swapper_pg_dir init_level4_pgt
extern void paging_init(void); extern void paging_init(void);
extern void clear_kernel_mapping(unsigned long addr, unsigned long size); extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
...@@ -29,7 +29,7 @@ extern void clear_kernel_mapping(unsigned long addr, unsigned long size); ...@@ -29,7 +29,7 @@ extern void clear_kernel_mapping(unsigned long addr, unsigned long size);
* for zero-mapped memory areas etc.. * for zero-mapped memory areas etc..
*/ */
extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)]; extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) (pfn_to_page(__pa_symbol(&empty_zero_page) >> PAGE_SHIFT)) #define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
#endif /* !__ASSEMBLY__ */ #endif /* !__ASSEMBLY__ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment