Commit 505569d2 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Ingo Molnar:
 "Misc fixes: two vdso fixes, two kbuild fixes and a boot failure fix
  with certain odd memory mappings"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86, vdso: Use asm volatile in __getcpu
  x86/build: Clean auto-generated processor feature files
  x86: Fix mkcapflags.sh bash-ism
  x86: Fix step size adjustment during initial memory mapping
  x86_64, vdso: Fix the vdso address randomization algorithm
parents 5ab551d6 2aba73a6
...@@ -51,6 +51,7 @@ targets += cpustr.h ...@@ -51,6 +51,7 @@ targets += cpustr.h
$(obj)/cpustr.h: $(obj)/mkcpustr FORCE $(obj)/cpustr.h: $(obj)/mkcpustr FORCE
$(call if_changed,cpustr) $(call if_changed,cpustr)
endif endif
clean-files += cpustr.h
# --------------------------------------------------------------------------- # ---------------------------------------------------------------------------
......
...@@ -80,9 +80,11 @@ static inline unsigned int __getcpu(void) ...@@ -80,9 +80,11 @@ static inline unsigned int __getcpu(void)
/* /*
* Load per CPU data from GDT. LSL is faster than RDTSCP and * Load per CPU data from GDT. LSL is faster than RDTSCP and
* works on all CPUs. * works on all CPUs. This is volatile so that it orders
* correctly wrt barrier() and to keep gcc from cleverly
* hoisting it out of the calling function.
*/ */
asm("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG)); asm volatile ("lsl %1,%0" : "=r" (p) : "r" (__PER_CPU_SEG));
return p; return p;
} }
......
...@@ -66,3 +66,4 @@ targets += capflags.c ...@@ -66,3 +66,4 @@ targets += capflags.c
$(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.sh FORCE $(obj)/capflags.c: $(cpufeature) $(src)/mkcapflags.sh FORCE
$(call if_changed,mkcapflags) $(call if_changed,mkcapflags)
endif endif
clean-files += capflags.c
...@@ -28,7 +28,7 @@ function dump_array() ...@@ -28,7 +28,7 @@ function dump_array()
# If the /* comment */ starts with a quote string, grab that. # If the /* comment */ starts with a quote string, grab that.
VALUE="$(echo "$i" | sed -n 's@.*/\* *\("[^"]*"\).*\*/@\1@p')" VALUE="$(echo "$i" | sed -n 's@.*/\* *\("[^"]*"\).*\*/@\1@p')"
[ -z "$VALUE" ] && VALUE="\"$NAME\"" [ -z "$VALUE" ] && VALUE="\"$NAME\""
[ "$VALUE" == '""' ] && continue [ "$VALUE" = '""' ] && continue
# Name is uppercase, VALUE is all lowercase # Name is uppercase, VALUE is all lowercase
VALUE="$(echo "$VALUE" | tr A-Z a-z)" VALUE="$(echo "$VALUE" | tr A-Z a-z)"
......
...@@ -438,20 +438,20 @@ static unsigned long __init init_range_memory_mapping( ...@@ -438,20 +438,20 @@ static unsigned long __init init_range_memory_mapping(
static unsigned long __init get_new_step_size(unsigned long step_size) static unsigned long __init get_new_step_size(unsigned long step_size)
{ {
/* /*
* Explain why we shift by 5 and why we don't have to worry about * Initial mapped size is PMD_SIZE (2M).
* 'step_size << 5' overflowing:
*
* initial mapped size is PMD_SIZE (2M).
* We can not set step_size to be PUD_SIZE (1G) yet. * We can not set step_size to be PUD_SIZE (1G) yet.
* In worse case, when we cross the 1G boundary, and * In worse case, when we cross the 1G boundary, and
* PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k) * PG_LEVEL_2M is not set, we will need 1+1+512 pages (2M + 8k)
* to map 1G range with PTE. Use 5 as shift for now. * to map 1G range with PTE. Hence we use one less than the
* difference of page table level shifts.
* *
* Don't need to worry about overflow, on 32bit, when step_size * Don't need to worry about overflow in the top-down case, on 32bit,
* is 0, round_down() returns 0 for start, and that turns it * when step_size is 0, round_down() returns 0 for start, and that
* into 0x100000000ULL. * turns it into 0x100000000ULL.
* In the bottom-up case, round_up(x, 0) returns 0 though too, which
* needs to be taken into consideration by the code below.
*/ */
return step_size << 5; return step_size << (PMD_SHIFT - PAGE_SHIFT - 1);
} }
/** /**
...@@ -471,7 +471,6 @@ static void __init memory_map_top_down(unsigned long map_start, ...@@ -471,7 +471,6 @@ static void __init memory_map_top_down(unsigned long map_start,
unsigned long step_size; unsigned long step_size;
unsigned long addr; unsigned long addr;
unsigned long mapped_ram_size = 0; unsigned long mapped_ram_size = 0;
unsigned long new_mapped_ram_size;
/* xen has big range in reserved near end of ram, skip it at first.*/ /* xen has big range in reserved near end of ram, skip it at first.*/
addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE); addr = memblock_find_in_range(map_start, map_end, PMD_SIZE, PMD_SIZE);
...@@ -496,14 +495,12 @@ static void __init memory_map_top_down(unsigned long map_start, ...@@ -496,14 +495,12 @@ static void __init memory_map_top_down(unsigned long map_start,
start = map_start; start = map_start;
} else } else
start = map_start; start = map_start;
new_mapped_ram_size = init_range_memory_mapping(start, mapped_ram_size += init_range_memory_mapping(start,
last_start); last_start);
last_start = start; last_start = start;
min_pfn_mapped = last_start >> PAGE_SHIFT; min_pfn_mapped = last_start >> PAGE_SHIFT;
/* only increase step_size after big range get mapped */ if (mapped_ram_size >= step_size)
if (new_mapped_ram_size > mapped_ram_size)
step_size = get_new_step_size(step_size); step_size = get_new_step_size(step_size);
mapped_ram_size += new_mapped_ram_size;
} }
if (real_end < map_end) if (real_end < map_end)
...@@ -524,7 +521,7 @@ static void __init memory_map_top_down(unsigned long map_start, ...@@ -524,7 +521,7 @@ static void __init memory_map_top_down(unsigned long map_start,
static void __init memory_map_bottom_up(unsigned long map_start, static void __init memory_map_bottom_up(unsigned long map_start,
unsigned long map_end) unsigned long map_end)
{ {
unsigned long next, new_mapped_ram_size, start; unsigned long next, start;
unsigned long mapped_ram_size = 0; unsigned long mapped_ram_size = 0;
/* step_size need to be small so pgt_buf from BRK could cover it */ /* step_size need to be small so pgt_buf from BRK could cover it */
unsigned long step_size = PMD_SIZE; unsigned long step_size = PMD_SIZE;
...@@ -539,19 +536,19 @@ static void __init memory_map_bottom_up(unsigned long map_start, ...@@ -539,19 +536,19 @@ static void __init memory_map_bottom_up(unsigned long map_start,
* for page table. * for page table.
*/ */
while (start < map_end) { while (start < map_end) {
if (map_end - start > step_size) { if (step_size && map_end - start > step_size) {
next = round_up(start + 1, step_size); next = round_up(start + 1, step_size);
if (next > map_end) if (next > map_end)
next = map_end; next = map_end;
} else } else {
next = map_end; next = map_end;
}
new_mapped_ram_size = init_range_memory_mapping(start, next); mapped_ram_size += init_range_memory_mapping(start, next);
start = next; start = next;
if (new_mapped_ram_size > mapped_ram_size) if (mapped_ram_size >= step_size)
step_size = get_new_step_size(step_size); step_size = get_new_step_size(step_size);
mapped_ram_size += new_mapped_ram_size;
} }
} }
......
...@@ -41,12 +41,17 @@ void __init init_vdso_image(const struct vdso_image *image) ...@@ -41,12 +41,17 @@ void __init init_vdso_image(const struct vdso_image *image)
struct linux_binprm; struct linux_binprm;
/* Put the vdso above the (randomized) stack with another randomized offset. /*
This way there is no hole in the middle of address space. * Put the vdso above the (randomized) stack with another randomized
To save memory make sure it is still in the same PTE as the stack top. * offset. This way there is no hole in the middle of address space.
This doesn't give that many random bits. * To save memory make sure it is still in the same PTE as the stack
* top. This doesn't give that many random bits.
Only used for the 64-bit and x32 vdsos. */ *
* Note that this algorithm is imperfect: the distribution of the vdso
* start address within a PMD is biased toward the end.
*
* Only used for the 64-bit and x32 vdsos.
*/
static unsigned long vdso_addr(unsigned long start, unsigned len) static unsigned long vdso_addr(unsigned long start, unsigned len)
{ {
#ifdef CONFIG_X86_32 #ifdef CONFIG_X86_32
...@@ -54,22 +59,30 @@ static unsigned long vdso_addr(unsigned long start, unsigned len) ...@@ -54,22 +59,30 @@ static unsigned long vdso_addr(unsigned long start, unsigned len)
#else #else
unsigned long addr, end; unsigned long addr, end;
unsigned offset; unsigned offset;
end = (start + PMD_SIZE - 1) & PMD_MASK;
/*
* Round up the start address. It can start out unaligned as a result
* of stack start randomization.
*/
start = PAGE_ALIGN(start);
/* Round the lowest possible end address up to a PMD boundary. */
end = (start + len + PMD_SIZE - 1) & PMD_MASK;
if (end >= TASK_SIZE_MAX) if (end >= TASK_SIZE_MAX)
end = TASK_SIZE_MAX; end = TASK_SIZE_MAX;
end -= len; end -= len;
/* This loses some more bits than a modulo, but is cheaper */
offset = get_random_int() & (PTRS_PER_PTE - 1); if (end > start) {
offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
addr = start + (offset << PAGE_SHIFT); addr = start + (offset << PAGE_SHIFT);
if (addr >= end) } else {
addr = end; addr = start;
}
/* /*
* page-align it here so that get_unmapped_area doesn't * Forcibly align the final address in case we have a hardware
* align it wrongfully again to the next page. addr can come in 4K * issue that requires alignment for performance reasons.
* unaligned here as a result of stack start randomization.
*/ */
addr = PAGE_ALIGN(addr);
addr = align_vdso_addr(addr); addr = align_vdso_addr(addr);
return addr; return addr;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment