Commit 1982269a authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 mm changes from Ingo Molnar:
 "Misc improvements:

   - Fix /proc/mtrr reporting
   - Fix ioremap printout
   - Remove the unused pvclock fixmap entry on 32-bit
   - misc cleanups"

* 'x86-mm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/ioremap: Correct function name output
  x86: Fix /proc/mtrr with base/size more than 44bits
  ix86: Don't waste fixmap entries
  x86/mm: Drop unneeded include <asm/*pgtable, page*_types.h>
  x86_64: Correct phys_addr in cleanup_highmap comment
parents fdd78889 4f4319a0
...@@ -27,8 +27,6 @@ ...@@ -27,8 +27,6 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/segment.h> #include <asm/segment.h>
#include <asm/pgtable_types.h>
#include <asm/page_types.h>
#include <asm/boot.h> #include <asm/boot.h>
#include <asm/msr.h> #include <asm/msr.h>
#include <asm/processor-flags.h> #include <asm/processor-flags.h>
......
...@@ -81,10 +81,10 @@ enum fixed_addresses { ...@@ -81,10 +81,10 @@ enum fixed_addresses {
+ ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1, + ((VSYSCALL_END-VSYSCALL_START) >> PAGE_SHIFT) - 1,
VVAR_PAGE, VVAR_PAGE,
VSYSCALL_HPET, VSYSCALL_HPET,
#endif
#ifdef CONFIG_PARAVIRT_CLOCK #ifdef CONFIG_PARAVIRT_CLOCK
PVCLOCK_FIXMAP_BEGIN, PVCLOCK_FIXMAP_BEGIN,
PVCLOCK_FIXMAP_END = PVCLOCK_FIXMAP_BEGIN+PVCLOCK_VSYSCALL_NR_PAGES-1, PVCLOCK_FIXMAP_END = PVCLOCK_FIXMAP_BEGIN+PVCLOCK_VSYSCALL_NR_PAGES-1,
#endif
#endif #endif
FIX_DBGP_BASE, FIX_DBGP_BASE,
FIX_EARLYCON_MEM_BASE, FIX_EARLYCON_MEM_BASE,
......
...@@ -510,8 +510,9 @@ generic_get_free_region(unsigned long base, unsigned long size, int replace_reg) ...@@ -510,8 +510,9 @@ generic_get_free_region(unsigned long base, unsigned long size, int replace_reg)
static void generic_get_mtrr(unsigned int reg, unsigned long *base, static void generic_get_mtrr(unsigned int reg, unsigned long *base,
unsigned long *size, mtrr_type *type) unsigned long *size, mtrr_type *type)
{ {
unsigned int mask_lo, mask_hi, base_lo, base_hi; u32 mask_lo, mask_hi, base_lo, base_hi;
unsigned int tmp, hi; unsigned int hi;
u64 tmp, mask;
/* /*
* get_mtrr doesn't need to update mtrr_state, also it could be called * get_mtrr doesn't need to update mtrr_state, also it could be called
...@@ -532,18 +533,18 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base, ...@@ -532,18 +533,18 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi); rdmsr(MTRRphysBase_MSR(reg), base_lo, base_hi);
/* Work out the shifted address mask: */ /* Work out the shifted address mask: */
tmp = mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT; tmp = (u64)mask_hi << (32 - PAGE_SHIFT) | mask_lo >> PAGE_SHIFT;
mask_lo = size_or_mask | tmp; mask = size_or_mask | tmp;
/* Expand tmp with high bits to all 1s: */ /* Expand tmp with high bits to all 1s: */
hi = fls(tmp); hi = fls64(tmp);
if (hi > 0) { if (hi > 0) {
tmp |= ~((1<<(hi - 1)) - 1); tmp |= ~((1ULL<<(hi - 1)) - 1);
if (tmp != mask_lo) { if (tmp != mask) {
printk(KERN_WARNING "mtrr: your BIOS has configured an incorrect mask, fixing it.\n"); printk(KERN_WARNING "mtrr: your BIOS has configured an incorrect mask, fixing it.\n");
add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK); add_taint(TAINT_FIRMWARE_WORKAROUND, LOCKDEP_STILL_OK);
mask_lo = tmp; mask = tmp;
} }
} }
...@@ -551,8 +552,8 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base, ...@@ -551,8 +552,8 @@ static void generic_get_mtrr(unsigned int reg, unsigned long *base,
* This works correctly if size is a power of two, i.e. a * This works correctly if size is a power of two, i.e. a
* contiguous range: * contiguous range:
*/ */
*size = -mask_lo; *size = -mask;
*base = base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT; *base = (u64)base_hi << (32 - PAGE_SHIFT) | base_lo >> PAGE_SHIFT;
*type = base_lo & 0xff; *type = base_lo & 0xff;
out_put_cpu: out_put_cpu:
......
...@@ -305,7 +305,8 @@ int mtrr_add_page(unsigned long base, unsigned long size, ...@@ -305,7 +305,8 @@ int mtrr_add_page(unsigned long base, unsigned long size,
return -EINVAL; return -EINVAL;
} }
if (base & size_or_mask || size & size_or_mask) { if ((base | (base + size - 1)) >>
(boot_cpu_data.x86_phys_bits - PAGE_SHIFT)) {
pr_warning("mtrr: base or size exceeds the MTRR width\n"); pr_warning("mtrr: base or size exceeds the MTRR width\n");
return -EINVAL; return -EINVAL;
} }
...@@ -583,6 +584,7 @@ static struct syscore_ops mtrr_syscore_ops = { ...@@ -583,6 +584,7 @@ static struct syscore_ops mtrr_syscore_ops = {
int __initdata changed_by_mtrr_cleanup; int __initdata changed_by_mtrr_cleanup;
#define SIZE_OR_MASK_BITS(n) (~((1ULL << ((n) - PAGE_SHIFT)) - 1))
/** /**
* mtrr_bp_init - initialize mtrrs on the boot CPU * mtrr_bp_init - initialize mtrrs on the boot CPU
* *
...@@ -600,7 +602,7 @@ void __init mtrr_bp_init(void) ...@@ -600,7 +602,7 @@ void __init mtrr_bp_init(void)
if (cpu_has_mtrr) { if (cpu_has_mtrr) {
mtrr_if = &generic_mtrr_ops; mtrr_if = &generic_mtrr_ops;
size_or_mask = 0xff000000; /* 36 bits */ size_or_mask = SIZE_OR_MASK_BITS(36);
size_and_mask = 0x00f00000; size_and_mask = 0x00f00000;
phys_addr = 36; phys_addr = 36;
...@@ -619,7 +621,7 @@ void __init mtrr_bp_init(void) ...@@ -619,7 +621,7 @@ void __init mtrr_bp_init(void)
boot_cpu_data.x86_mask == 0x4)) boot_cpu_data.x86_mask == 0x4))
phys_addr = 36; phys_addr = 36;
size_or_mask = ~((1ULL << (phys_addr - PAGE_SHIFT)) - 1); size_or_mask = SIZE_OR_MASK_BITS(phys_addr);
size_and_mask = ~size_or_mask & 0xfffff00000ULL; size_and_mask = ~size_or_mask & 0xfffff00000ULL;
} else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR && } else if (boot_cpu_data.x86_vendor == X86_VENDOR_CENTAUR &&
boot_cpu_data.x86 == 6) { boot_cpu_data.x86 == 6) {
...@@ -627,7 +629,7 @@ void __init mtrr_bp_init(void) ...@@ -627,7 +629,7 @@ void __init mtrr_bp_init(void)
* VIA C* family have Intel style MTRRs, * VIA C* family have Intel style MTRRs,
* but don't support PAE * but don't support PAE
*/ */
size_or_mask = 0xfff00000; /* 32 bits */ size_or_mask = SIZE_OR_MASK_BITS(32);
size_and_mask = 0; size_and_mask = 0;
phys_addr = 32; phys_addr = 32;
} }
...@@ -637,21 +639,21 @@ void __init mtrr_bp_init(void) ...@@ -637,21 +639,21 @@ void __init mtrr_bp_init(void)
if (cpu_has_k6_mtrr) { if (cpu_has_k6_mtrr) {
/* Pre-Athlon (K6) AMD CPU MTRRs */ /* Pre-Athlon (K6) AMD CPU MTRRs */
mtrr_if = mtrr_ops[X86_VENDOR_AMD]; mtrr_if = mtrr_ops[X86_VENDOR_AMD];
size_or_mask = 0xfff00000; /* 32 bits */ size_or_mask = SIZE_OR_MASK_BITS(32);
size_and_mask = 0; size_and_mask = 0;
} }
break; break;
case X86_VENDOR_CENTAUR: case X86_VENDOR_CENTAUR:
if (cpu_has_centaur_mcr) { if (cpu_has_centaur_mcr) {
mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR]; mtrr_if = mtrr_ops[X86_VENDOR_CENTAUR];
size_or_mask = 0xfff00000; /* 32 bits */ size_or_mask = SIZE_OR_MASK_BITS(32);
size_and_mask = 0; size_and_mask = 0;
} }
break; break;
case X86_VENDOR_CYRIX: case X86_VENDOR_CYRIX:
if (cpu_has_cyrix_arr) { if (cpu_has_cyrix_arr) {
mtrr_if = mtrr_ops[X86_VENDOR_CYRIX]; mtrr_if = mtrr_ops[X86_VENDOR_CYRIX];
size_or_mask = 0xfff00000; /* 32 bits */ size_or_mask = SIZE_OR_MASK_BITS(32);
size_and_mask = 0; size_and_mask = 0;
} }
break; break;
......
...@@ -368,7 +368,7 @@ void __init init_extra_mapping_uc(unsigned long phys, unsigned long size) ...@@ -368,7 +368,7 @@ void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
* *
* from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text) * from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
* *
* phys_addr holds the negative offset to the kernel, which is added * phys_base holds the negative offset to the kernel, which is added
* to the compile time generated pmds. This results in invalid pmds up * to the compile time generated pmds. This results in invalid pmds up
* to the point where we hit the physaddr 0 mapping. * to the point where we hit the physaddr 0 mapping.
* *
......
...@@ -501,15 +501,15 @@ __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot) ...@@ -501,15 +501,15 @@ __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
} }
if (slot < 0) { if (slot < 0) {
printk(KERN_INFO "early_iomap(%08llx, %08lx) not found slot\n", printk(KERN_INFO "%s(%08llx, %08lx) not found slot\n",
(u64)phys_addr, size); __func__, (u64)phys_addr, size);
WARN_ON(1); WARN_ON(1);
return NULL; return NULL;
} }
if (early_ioremap_debug) { if (early_ioremap_debug) {
printk(KERN_INFO "early_ioremap(%08llx, %08lx) [%d] => ", printk(KERN_INFO "%s(%08llx, %08lx) [%d] => ",
(u64)phys_addr, size, slot); __func__, (u64)phys_addr, size, slot);
dump_stack(); dump_stack();
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment