Commit 2cf30826 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip

Pull x86 fixes from Ingo Molnar:
 "Fixes all around the map: W+X kernel mapping fix, WCHAN fixes, two
  build failure fixes for corner case configs, x32 header fix and a
  speling fix"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/headers/uapi: Fix __BITS_PER_LONG value for x32 builds
  x86/mm: Set NX on gap between __ex_table and rodata
  x86/kexec: Fix kexec crash in syscall kexec_file_load()
  x86/process: Unify 32bit and 64bit implementations of get_wchan()
  x86/process: Add proper bound checks in 64bit get_wchan()
  x86, efi, kasan: Fix build failure on !KASAN && KMEMCHECK=y kernels
  x86/hyperv: Fix the build in the !CONFIG_KEXEC_CORE case
  x86/cpufeatures: Correct spelling of the HWP_NOTIFY flag
parents 37cc7ab1 f4b4aae1
...@@ -193,7 +193,7 @@ ...@@ -193,7 +193,7 @@
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */ #define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */ #define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
#define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */ #define X86_FEATURE_HWP ( 7*32+ 10) /* "hwp" Intel HWP */
#define X86_FEATURE_HWP_NOITFY ( 7*32+ 11) /* Intel HWP_NOTIFY */ #define X86_FEATURE_HWP_NOTIFY ( 7*32+ 11) /* Intel HWP_NOTIFY */
#define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */ #define X86_FEATURE_HWP_ACT_WINDOW ( 7*32+ 12) /* Intel HWP_ACT_WINDOW */
#define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */ #define X86_FEATURE_HWP_EPP ( 7*32+13) /* Intel HWP_EPP */
#define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */ #define X86_FEATURE_HWP_PKG_REQ ( 7*32+14) /* Intel HWP_PKG_REQ */
......
#ifndef __ASM_X86_BITSPERLONG_H #ifndef __ASM_X86_BITSPERLONG_H
#define __ASM_X86_BITSPERLONG_H #define __ASM_X86_BITSPERLONG_H
#ifdef __x86_64__ #if defined(__x86_64__) && !defined(__ILP32__)
# define __BITS_PER_LONG 64 # define __BITS_PER_LONG 64
#else #else
# define __BITS_PER_LONG 32 # define __BITS_PER_LONG 32
......
...@@ -34,11 +34,10 @@ ...@@ -34,11 +34,10 @@
struct ms_hyperv_info ms_hyperv; struct ms_hyperv_info ms_hyperv;
EXPORT_SYMBOL_GPL(ms_hyperv); EXPORT_SYMBOL_GPL(ms_hyperv);
static void (*hv_kexec_handler)(void);
static void (*hv_crash_handler)(struct pt_regs *regs);
#if IS_ENABLED(CONFIG_HYPERV) #if IS_ENABLED(CONFIG_HYPERV)
static void (*vmbus_handler)(void); static void (*vmbus_handler)(void);
static void (*hv_kexec_handler)(void);
static void (*hv_crash_handler)(struct pt_regs *regs);
void hyperv_vector_handler(struct pt_regs *regs) void hyperv_vector_handler(struct pt_regs *regs)
{ {
...@@ -96,8 +95,8 @@ void hv_remove_crash_handler(void) ...@@ -96,8 +95,8 @@ void hv_remove_crash_handler(void)
hv_crash_handler = NULL; hv_crash_handler = NULL;
} }
EXPORT_SYMBOL_GPL(hv_remove_crash_handler); EXPORT_SYMBOL_GPL(hv_remove_crash_handler);
#endif
#ifdef CONFIG_KEXEC_CORE
static void hv_machine_shutdown(void) static void hv_machine_shutdown(void)
{ {
if (kexec_in_progress && hv_kexec_handler) if (kexec_in_progress && hv_kexec_handler)
...@@ -111,7 +110,8 @@ static void hv_machine_crash_shutdown(struct pt_regs *regs) ...@@ -111,7 +110,8 @@ static void hv_machine_crash_shutdown(struct pt_regs *regs)
hv_crash_handler(regs); hv_crash_handler(regs);
native_machine_crash_shutdown(regs); native_machine_crash_shutdown(regs);
} }
#endif /* CONFIG_KEXEC_CORE */
#endif /* CONFIG_HYPERV */
static uint32_t __init ms_hyperv_platform(void) static uint32_t __init ms_hyperv_platform(void)
{ {
...@@ -186,8 +186,10 @@ static void __init ms_hyperv_init_platform(void) ...@@ -186,8 +186,10 @@ static void __init ms_hyperv_init_platform(void)
no_timer_check = 1; no_timer_check = 1;
#endif #endif
#if IS_ENABLED(CONFIG_HYPERV) && defined(CONFIG_KEXEC_CORE)
machine_ops.shutdown = hv_machine_shutdown; machine_ops.shutdown = hv_machine_shutdown;
machine_ops.crash_shutdown = hv_machine_crash_shutdown; machine_ops.crash_shutdown = hv_machine_crash_shutdown;
#endif
mark_tsc_unstable("running on Hyper-V"); mark_tsc_unstable("running on Hyper-V");
} }
......
...@@ -37,7 +37,7 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c) ...@@ -37,7 +37,7 @@ void init_scattered_cpuid_features(struct cpuinfo_x86 *c)
{ X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 }, { X86_FEATURE_PLN, CR_EAX, 4, 0x00000006, 0 },
{ X86_FEATURE_PTS, CR_EAX, 6, 0x00000006, 0 }, { X86_FEATURE_PTS, CR_EAX, 6, 0x00000006, 0 },
{ X86_FEATURE_HWP, CR_EAX, 7, 0x00000006, 0 }, { X86_FEATURE_HWP, CR_EAX, 7, 0x00000006, 0 },
{ X86_FEATURE_HWP_NOITFY, CR_EAX, 8, 0x00000006, 0 }, { X86_FEATURE_HWP_NOTIFY, CR_EAX, 8, 0x00000006, 0 },
{ X86_FEATURE_HWP_ACT_WINDOW, CR_EAX, 9, 0x00000006, 0 }, { X86_FEATURE_HWP_ACT_WINDOW, CR_EAX, 9, 0x00000006, 0 },
{ X86_FEATURE_HWP_EPP, CR_EAX,10, 0x00000006, 0 }, { X86_FEATURE_HWP_EPP, CR_EAX,10, 0x00000006, 0 },
{ X86_FEATURE_HWP_PKG_REQ, CR_EAX,11, 0x00000006, 0 }, { X86_FEATURE_HWP_PKG_REQ, CR_EAX,11, 0x00000006, 0 },
......
...@@ -185,10 +185,9 @@ void native_machine_crash_shutdown(struct pt_regs *regs) ...@@ -185,10 +185,9 @@ void native_machine_crash_shutdown(struct pt_regs *regs)
} }
#ifdef CONFIG_KEXEC_FILE #ifdef CONFIG_KEXEC_FILE
static int get_nr_ram_ranges_callback(unsigned long start_pfn, static int get_nr_ram_ranges_callback(u64 start, u64 end, void *arg)
unsigned long nr_pfn, void *arg)
{ {
int *nr_ranges = arg; unsigned int *nr_ranges = arg;
(*nr_ranges)++; (*nr_ranges)++;
return 0; return 0;
...@@ -214,7 +213,7 @@ static void fill_up_crash_elf_data(struct crash_elf_data *ced, ...@@ -214,7 +213,7 @@ static void fill_up_crash_elf_data(struct crash_elf_data *ced,
ced->image = image; ced->image = image;
walk_system_ram_range(0, -1, &nr_ranges, walk_system_ram_res(0, -1, &nr_ranges,
get_nr_ram_ranges_callback); get_nr_ram_ranges_callback);
ced->max_nr_ranges = nr_ranges; ced->max_nr_ranges = nr_ranges;
......
...@@ -506,3 +506,58 @@ unsigned long arch_randomize_brk(struct mm_struct *mm) ...@@ -506,3 +506,58 @@ unsigned long arch_randomize_brk(struct mm_struct *mm)
return randomize_range(mm->brk, range_end, 0) ? : mm->brk; return randomize_range(mm->brk, range_end, 0) ? : mm->brk;
} }
/*
* Called from fs/proc with a reference on @p to find the function
* which called into schedule(). This needs to be done carefully
* because the task might wake up and we might look at a stack
* changing under us.
*/
unsigned long get_wchan(struct task_struct *p)
{
unsigned long start, bottom, top, sp, fp, ip;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
start = (unsigned long)task_stack_page(p);
if (!start)
return 0;
/*
* Layout of the stack page:
*
* ----------- topmax = start + THREAD_SIZE - sizeof(unsigned long)
* PADDING
* ----------- top = topmax - TOP_OF_KERNEL_STACK_PADDING
* stack
* ----------- bottom = start + sizeof(thread_info)
* thread_info
* ----------- start
*
* The tasks stack pointer points at the location where the
* framepointer is stored. The data on the stack is:
* ... IP FP ... IP FP
*
* We need to read FP and IP, so we need to adjust the upper
* bound by another unsigned long.
*/
top = start + THREAD_SIZE - TOP_OF_KERNEL_STACK_PADDING;
top -= 2 * sizeof(unsigned long);
bottom = start + sizeof(struct thread_info);
sp = READ_ONCE(p->thread.sp);
if (sp < bottom || sp > top)
return 0;
fp = READ_ONCE(*(unsigned long *)sp);
do {
if (fp < bottom || fp > top)
return 0;
ip = READ_ONCE(*(unsigned long *)(fp + sizeof(unsigned long)));
if (!in_sched_functions(ip))
return ip;
fp = READ_ONCE(*(unsigned long *)fp);
} while (count++ < 16 && p->state != TASK_RUNNING);
return 0;
}
...@@ -324,31 +324,3 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p) ...@@ -324,31 +324,3 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
return prev_p; return prev_p;
} }
#define top_esp (THREAD_SIZE - sizeof(unsigned long))
#define top_ebp (THREAD_SIZE - 2*sizeof(unsigned long))
unsigned long get_wchan(struct task_struct *p)
{
unsigned long bp, sp, ip;
unsigned long stack_page;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
stack_page = (unsigned long)task_stack_page(p);
sp = p->thread.sp;
if (!stack_page || sp < stack_page || sp > top_esp+stack_page)
return 0;
/* include/asm-i386/system.h:switch_to() pushes bp last. */
bp = *(unsigned long *) sp;
do {
if (bp < stack_page || bp > top_ebp+stack_page)
return 0;
ip = *(unsigned long *) (bp+4);
if (!in_sched_functions(ip))
return ip;
bp = *(unsigned long *) bp;
} while (count++ < 16);
return 0;
}
...@@ -499,30 +499,6 @@ void set_personality_ia32(bool x32) ...@@ -499,30 +499,6 @@ void set_personality_ia32(bool x32)
} }
EXPORT_SYMBOL_GPL(set_personality_ia32); EXPORT_SYMBOL_GPL(set_personality_ia32);
unsigned long get_wchan(struct task_struct *p)
{
unsigned long stack;
u64 fp, ip;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
stack = (unsigned long)task_stack_page(p);
if (p->thread.sp < stack || p->thread.sp >= stack+THREAD_SIZE)
return 0;
fp = *(u64 *)(p->thread.sp);
do {
if (fp < (unsigned long)stack ||
fp >= (unsigned long)stack+THREAD_SIZE)
return 0;
ip = *(u64 *)(fp+8);
if (!in_sched_functions(ip))
return ip;
fp = *(u64 *)fp;
} while (count++ < 16);
return 0;
}
long do_arch_prctl(struct task_struct *task, int code, unsigned long addr) long do_arch_prctl(struct task_struct *task, int code, unsigned long addr)
{ {
int ret = 0; int ret = 0;
......
...@@ -1132,7 +1132,7 @@ void mark_rodata_ro(void) ...@@ -1132,7 +1132,7 @@ void mark_rodata_ro(void)
* has been zapped already via cleanup_highmem(). * has been zapped already via cleanup_highmem().
*/ */
all_end = roundup((unsigned long)_brk_end, PMD_SIZE); all_end = roundup((unsigned long)_brk_end, PMD_SIZE);
set_memory_nx(rodata_start, (all_end - rodata_start) >> PAGE_SHIFT); set_memory_nx(text_end, (all_end - text_end) >> PAGE_SHIFT);
rodata_test(); rodata_test();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment