Commit 62b3f981 authored by Ingo Molnar's avatar Ingo Molnar

Merge branch 'x86/debug' into x86/cpu

parents af2e1f27 b05f78f5
...@@ -1882,6 +1882,12 @@ and is between 256 and 4096 characters. It is defined in the file ...@@ -1882,6 +1882,12 @@ and is between 256 and 4096 characters. It is defined in the file
shapers= [NET] shapers= [NET]
Maximal number of shapers. Maximal number of shapers.
show_msr= [x86] show boot-time MSR settings
Format: { <integer> }
Show boot-time (BIOS-initialized) MSR settings.
The parameter means the number of CPUs to show,
for example 1 means boot CPU only.
sim710= [SCSI,HW] sim710= [SCSI,HW]
See header of drivers/scsi/sim710.c. See header of drivers/scsi/sim710.c.
......
...@@ -430,6 +430,49 @@ static __init int setup_noclflush(char *arg) ...@@ -430,6 +430,49 @@ static __init int setup_noclflush(char *arg)
} }
__setup("noclflush", setup_noclflush); __setup("noclflush", setup_noclflush);
struct msr_range {
unsigned min;
unsigned max;
};
static struct msr_range msr_range_array[] __cpuinitdata = {
{ 0x00000000, 0x00000418},
{ 0xc0000000, 0xc000040b},
{ 0xc0010000, 0xc0010142},
{ 0xc0011000, 0xc001103b},
};
static void __cpuinit print_cpu_msr(void)
{
unsigned index;
u64 val;
int i;
unsigned index_min, index_max;
for (i = 0; i < ARRAY_SIZE(msr_range_array); i++) {
index_min = msr_range_array[i].min;
index_max = msr_range_array[i].max;
for (index = index_min; index < index_max; index++) {
if (rdmsrl_amd_safe(index, &val))
continue;
printk(KERN_INFO " MSR%08x: %016llx\n", index, val);
}
}
}
static int show_msr __cpuinitdata;
static __init int setup_show_msr(char *arg)
{
int num;
get_option(&arg, &num);
if (num > 0)
show_msr = num;
return 1;
}
__setup("show_msr=", setup_show_msr);
void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
{ {
if (c->x86_model_id[0]) if (c->x86_model_id[0])
...@@ -439,6 +482,14 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c) ...@@ -439,6 +482,14 @@ void __cpuinit print_cpu_info(struct cpuinfo_x86 *c)
printk(KERN_CONT " stepping %02x\n", c->x86_mask); printk(KERN_CONT " stepping %02x\n", c->x86_mask);
else else
printk(KERN_CONT "\n"); printk(KERN_CONT "\n");
#ifdef CONFIG_SMP
if (c->cpu_index < show_msr)
print_cpu_msr();
#else
if (show_msr)
print_cpu_msr();
#endif
} }
static __init int setup_disablecpuid(char *arg) static __init int setup_disablecpuid(char *arg)
......
...@@ -275,9 +275,9 @@ ENTRY(native_usergs_sysret64) ...@@ -275,9 +275,9 @@ ENTRY(native_usergs_sysret64)
ENTRY(ret_from_fork) ENTRY(ret_from_fork)
CFI_DEFAULT_STACK CFI_DEFAULT_STACK
push kernel_eflags(%rip) push kernel_eflags(%rip)
CFI_ADJUST_CFA_OFFSET 4 CFI_ADJUST_CFA_OFFSET 8
popf # reset kernel eflags popf # reset kernel eflags
CFI_ADJUST_CFA_OFFSET -4 CFI_ADJUST_CFA_OFFSET -8
call schedule_tail call schedule_tail
GET_THREAD_INFO(%rcx) GET_THREAD_INFO(%rcx)
testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx) testl $(_TIF_SYSCALL_TRACE|_TIF_SYSCALL_AUDIT),TI_flags(%rcx)
......
...@@ -330,6 +330,7 @@ struct pv_cpu_ops pv_cpu_ops = { ...@@ -330,6 +330,7 @@ struct pv_cpu_ops pv_cpu_ops = {
#endif #endif
.wbinvd = native_wbinvd, .wbinvd = native_wbinvd,
.read_msr = native_read_msr_safe, .read_msr = native_read_msr_safe,
.read_msr_amd = native_read_msr_amd_safe,
.write_msr = native_write_msr_safe, .write_msr = native_write_msr_safe,
.read_tsc = native_read_tsc, .read_tsc = native_read_tsc,
.read_pmc = native_read_pmc, .read_pmc = native_read_pmc,
......
...@@ -162,9 +162,16 @@ void __init setup_per_cpu_areas(void) ...@@ -162,9 +162,16 @@ void __init setup_per_cpu_areas(void)
printk(KERN_INFO printk(KERN_INFO
"cpu %d has no node %d or node-local memory\n", "cpu %d has no node %d or node-local memory\n",
cpu, node); cpu, node);
if (ptr)
printk(KERN_DEBUG "per cpu data for cpu%d at %016lx\n",
cpu, __pa(ptr));
} }
else else {
ptr = alloc_bootmem_pages_node(NODE_DATA(node), size); ptr = alloc_bootmem_pages_node(NODE_DATA(node), size);
if (ptr)
printk(KERN_DEBUG "per cpu data for cpu%d on node%d at %016lx\n",
cpu, node, __pa(ptr));
}
#endif #endif
per_cpu_offset(cpu) = ptr - __per_cpu_start; per_cpu_offset(cpu) = ptr - __per_cpu_start;
memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start); memcpy(ptr, __per_cpu_start, __per_cpu_end - __per_cpu_start);
......
...@@ -339,9 +339,8 @@ static void ...@@ -339,9 +339,8 @@ static void
show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs, show_trace_log_lvl(struct task_struct *task, struct pt_regs *regs,
unsigned long *stack, unsigned long bp, char *log_lvl) unsigned long *stack, unsigned long bp, char *log_lvl)
{ {
printk("\nCall Trace:\n"); printk("Call Trace:\n");
dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl); dump_trace(task, regs, stack, bp, &print_trace_ops, log_lvl);
printk("\n");
} }
void show_trace(struct task_struct *task, struct pt_regs *regs, void show_trace(struct task_struct *task, struct pt_regs *regs,
...@@ -386,6 +385,7 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs, ...@@ -386,6 +385,7 @@ show_stack_log_lvl(struct task_struct *task, struct pt_regs *regs,
printk(" %016lx", *stack++); printk(" %016lx", *stack++);
touch_nmi_watchdog(); touch_nmi_watchdog();
} }
printk("\n");
show_trace_log_lvl(task, regs, sp, bp, log_lvl); show_trace_log_lvl(task, regs, sp, bp, log_lvl);
} }
...@@ -443,7 +443,6 @@ void show_registers(struct pt_regs *regs) ...@@ -443,7 +443,6 @@ void show_registers(struct pt_regs *regs)
printk("Stack: "); printk("Stack: ");
show_stack_log_lvl(NULL, regs, (unsigned long *)sp, show_stack_log_lvl(NULL, regs, (unsigned long *)sp,
regs->bp, ""); regs->bp, "");
printk("\n");
printk(KERN_EMERG "Code: "); printk(KERN_EMERG "Code: ");
......
...@@ -63,6 +63,22 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr, ...@@ -63,6 +63,22 @@ static inline unsigned long long native_read_msr_safe(unsigned int msr,
return EAX_EDX_VAL(val, low, high); return EAX_EDX_VAL(val, low, high);
} }
static inline unsigned long long native_read_msr_amd_safe(unsigned int msr,
int *err)
{
DECLARE_ARGS(val, low, high);
asm volatile("2: rdmsr ; xor %0,%0\n"
"1:\n\t"
".section .fixup,\"ax\"\n\t"
"3: mov %3,%0 ; jmp 1b\n\t"
".previous\n\t"
_ASM_EXTABLE(2b, 3b)
: "=r" (*err), EAX_EDX_RET(val, low, high)
: "c" (msr), "D" (0x9c5a203a), "i" (-EFAULT));
return EAX_EDX_VAL(val, low, high);
}
static inline void native_write_msr(unsigned int msr, static inline void native_write_msr(unsigned int msr,
unsigned low, unsigned high) unsigned low, unsigned high)
{ {
...@@ -158,6 +174,13 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) ...@@ -158,6 +174,13 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
*p = native_read_msr_safe(msr, &err); *p = native_read_msr_safe(msr, &err);
return err; return err;
} }
static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
{
int err;
*p = native_read_msr_amd_safe(msr, &err);
return err;
}
#define rdtscl(low) \ #define rdtscl(low) \
((low) = (u32)native_read_tsc()) ((low) = (u32)native_read_tsc())
......
...@@ -137,6 +137,7 @@ struct pv_cpu_ops { ...@@ -137,6 +137,7 @@ struct pv_cpu_ops {
/* MSR, PMC and TSR operations. /* MSR, PMC and TSR operations.
err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */ err = 0/-EFAULT. wrmsr returns 0/-EFAULT. */
u64 (*read_msr_amd)(unsigned int msr, int *err);
u64 (*read_msr)(unsigned int msr, int *err); u64 (*read_msr)(unsigned int msr, int *err);
int (*write_msr)(unsigned int msr, unsigned low, unsigned high); int (*write_msr)(unsigned int msr, unsigned low, unsigned high);
...@@ -726,6 +727,10 @@ static inline u64 paravirt_read_msr(unsigned msr, int *err) ...@@ -726,6 +727,10 @@ static inline u64 paravirt_read_msr(unsigned msr, int *err)
{ {
return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err); return PVOP_CALL2(u64, pv_cpu_ops.read_msr, msr, err);
} }
static inline u64 paravirt_read_msr_amd(unsigned msr, int *err)
{
return PVOP_CALL2(u64, pv_cpu_ops.read_msr_amd, msr, err);
}
static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high) static inline int paravirt_write_msr(unsigned msr, unsigned low, unsigned high)
{ {
return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high); return PVOP_CALL3(int, pv_cpu_ops.write_msr, msr, low, high);
...@@ -771,6 +776,13 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p) ...@@ -771,6 +776,13 @@ static inline int rdmsrl_safe(unsigned msr, unsigned long long *p)
*p = paravirt_read_msr(msr, &err); *p = paravirt_read_msr(msr, &err);
return err; return err;
} }
static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
{
int err;
*p = paravirt_read_msr_amd(msr, &err);
return err;
}
static inline u64 paravirt_read_tsc(void) static inline u64 paravirt_read_tsc(void)
{ {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment