Commit 7421a10d authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip

* 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  x86: Use .cfi_sections for assembly code
  x86-64: Reduce SMP locks table size
  x86, asm: Introduce and use percpu_inc()
parents 752f114f 9e565292
...@@ -95,8 +95,9 @@ sp-$(CONFIG_X86_64) := rsp ...@@ -95,8 +95,9 @@ sp-$(CONFIG_X86_64) := rsp
cfi := $(call as-instr,.cfi_startproc\n.cfi_rel_offset $(sp-y)$(comma)0\n.cfi_endproc,-DCONFIG_AS_CFI=1) cfi := $(call as-instr,.cfi_startproc\n.cfi_rel_offset $(sp-y)$(comma)0\n.cfi_endproc,-DCONFIG_AS_CFI=1)
# is .cfi_signal_frame supported too? # is .cfi_signal_frame supported too?
cfi-sigframe := $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1) cfi-sigframe := $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1)
KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) cfi-sections := $(call as-instr,.cfi_sections .debug_frame,-DCONFIG_AS_CFI_SECTIONS=1)
KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections)
KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections)
LDFLAGS := -m elf_$(UTS_MACHINE) LDFLAGS := -m elf_$(UTS_MACHINE)
......
...@@ -6,8 +6,8 @@ ...@@ -6,8 +6,8 @@
.macro LOCK_PREFIX .macro LOCK_PREFIX
1: lock 1: lock
.section .smp_locks,"a" .section .smp_locks,"a"
_ASM_ALIGN .balign 4
_ASM_PTR 1b .long 1b - .
.previous .previous
.endm .endm
#else #else
......
...@@ -30,8 +30,8 @@ ...@@ -30,8 +30,8 @@
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
#define LOCK_PREFIX \ #define LOCK_PREFIX \
".section .smp_locks,\"a\"\n" \ ".section .smp_locks,\"a\"\n" \
_ASM_ALIGN "\n" \ ".balign 4\n" \
_ASM_PTR "661f\n" /* address */ \ ".long 661f - .\n" /* offset */ \
".previous\n" \ ".previous\n" \
"661:\n\tlock; " "661:\n\tlock; "
......
...@@ -34,6 +34,18 @@ ...@@ -34,6 +34,18 @@
#define CFI_SIGNAL_FRAME #define CFI_SIGNAL_FRAME
#endif #endif
#if defined(CONFIG_AS_CFI_SECTIONS) && defined(__ASSEMBLY__)
/*
* Emit CFI data in .debug_frame sections, not .eh_frame sections.
* The latter we currently just discard since we don't do DWARF
* unwinding at runtime. So only the offline DWARF information is
* useful to anyone. Note we should not use this directive if this
* file is used in the vDSO assembly, or if vmlinux.lds.S gets
* changed so it doesn't discard .eh_frame.
*/
.cfi_sections .debug_frame
#endif
#else #else
/* /*
......
...@@ -35,7 +35,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); ...@@ -35,7 +35,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
#define __ARCH_IRQ_STAT #define __ARCH_IRQ_STAT
#define inc_irq_stat(member) percpu_add(irq_stat.member, 1) #define inc_irq_stat(member) percpu_inc(irq_stat.member)
#define local_softirq_pending() percpu_read(irq_stat.__softirq_pending) #define local_softirq_pending() percpu_read(irq_stat.__softirq_pending)
......
...@@ -190,6 +190,29 @@ do { \ ...@@ -190,6 +190,29 @@ do { \
pfo_ret__; \ pfo_ret__; \
}) })
#define percpu_unary_op(op, var) \
({ \
switch (sizeof(var)) { \
case 1: \
asm(op "b "__percpu_arg(0) \
: "+m" (var)); \
break; \
case 2: \
asm(op "w "__percpu_arg(0) \
: "+m" (var)); \
break; \
case 4: \
asm(op "l "__percpu_arg(0) \
: "+m" (var)); \
break; \
case 8: \
asm(op "q "__percpu_arg(0) \
: "+m" (var)); \
break; \
default: __bad_percpu_size(); \
} \
})
/* /*
* percpu_read() makes gcc load the percpu variable every time it is * percpu_read() makes gcc load the percpu variable every time it is
* accessed while percpu_read_stable() allows the value to be cached. * accessed while percpu_read_stable() allows the value to be cached.
...@@ -207,6 +230,7 @@ do { \ ...@@ -207,6 +230,7 @@ do { \
#define percpu_and(var, val) percpu_to_op("and", var, val) #define percpu_and(var, val) percpu_to_op("and", var, val)
#define percpu_or(var, val) percpu_to_op("or", var, val) #define percpu_or(var, val) percpu_to_op("or", var, val)
#define percpu_xor(var, val) percpu_to_op("xor", var, val) #define percpu_xor(var, val) percpu_to_op("xor", var, val)
#define percpu_inc(var) percpu_unary_op("inc", var)
#define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) #define __this_cpu_read_1(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
#define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp)) #define __this_cpu_read_2(pcp) percpu_from_op("mov", (pcp), "m"(pcp))
......
...@@ -194,7 +194,7 @@ static void __init_or_module add_nops(void *insns, unsigned int len) ...@@ -194,7 +194,7 @@ static void __init_or_module add_nops(void *insns, unsigned int len)
} }
extern struct alt_instr __alt_instructions[], __alt_instructions_end[]; extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
extern u8 *__smp_locks[], *__smp_locks_end[]; extern s32 __smp_locks[], __smp_locks_end[];
static void *text_poke_early(void *addr, const void *opcode, size_t len); static void *text_poke_early(void *addr, const void *opcode, size_t len);
/* Replace instructions with better alternatives for this CPU type. /* Replace instructions with better alternatives for this CPU type.
...@@ -235,37 +235,39 @@ void __init_or_module apply_alternatives(struct alt_instr *start, ...@@ -235,37 +235,39 @@ void __init_or_module apply_alternatives(struct alt_instr *start,
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
static void alternatives_smp_lock(u8 **start, u8 **end, u8 *text, u8 *text_end) static void alternatives_smp_lock(const s32 *start, const s32 *end,
u8 *text, u8 *text_end)
{ {
u8 **ptr; const s32 *poff;
mutex_lock(&text_mutex); mutex_lock(&text_mutex);
for (ptr = start; ptr < end; ptr++) { for (poff = start; poff < end; poff++) {
if (*ptr < text) u8 *ptr = (u8 *)poff + *poff;
continue;
if (*ptr > text_end) if (!*poff || ptr < text || ptr >= text_end)
continue; continue;
/* turn DS segment override prefix into lock prefix */ /* turn DS segment override prefix into lock prefix */
text_poke(*ptr, ((unsigned char []){0xf0}), 1); text_poke(ptr, ((unsigned char []){0xf0}), 1);
}; };
mutex_unlock(&text_mutex); mutex_unlock(&text_mutex);
} }
static void alternatives_smp_unlock(u8 **start, u8 **end, u8 *text, u8 *text_end) static void alternatives_smp_unlock(const s32 *start, const s32 *end,
u8 *text, u8 *text_end)
{ {
u8 **ptr; const s32 *poff;
if (noreplace_smp) if (noreplace_smp)
return; return;
mutex_lock(&text_mutex); mutex_lock(&text_mutex);
for (ptr = start; ptr < end; ptr++) { for (poff = start; poff < end; poff++) {
if (*ptr < text) u8 *ptr = (u8 *)poff + *poff;
continue;
if (*ptr > text_end) if (!*poff || ptr < text || ptr >= text_end)
continue; continue;
/* turn lock prefix into DS segment override prefix */ /* turn lock prefix into DS segment override prefix */
text_poke(*ptr, ((unsigned char []){0x3E}), 1); text_poke(ptr, ((unsigned char []){0x3E}), 1);
}; };
mutex_unlock(&text_mutex); mutex_unlock(&text_mutex);
} }
...@@ -276,8 +278,8 @@ struct smp_alt_module { ...@@ -276,8 +278,8 @@ struct smp_alt_module {
char *name; char *name;
/* ptrs to lock prefixes */ /* ptrs to lock prefixes */
u8 **locks; const s32 *locks;
u8 **locks_end; const s32 *locks_end;
/* .text segment, needed to avoid patching init code ;) */ /* .text segment, needed to avoid patching init code ;) */
u8 *text; u8 *text;
...@@ -398,17 +400,20 @@ void alternatives_smp_switch(int smp) ...@@ -398,17 +400,20 @@ void alternatives_smp_switch(int smp)
int alternatives_text_reserved(void *start, void *end) int alternatives_text_reserved(void *start, void *end)
{ {
struct smp_alt_module *mod; struct smp_alt_module *mod;
u8 **ptr; const s32 *poff;
u8 *text_start = start; u8 *text_start = start;
u8 *text_end = end; u8 *text_end = end;
list_for_each_entry(mod, &smp_alt_modules, next) { list_for_each_entry(mod, &smp_alt_modules, next) {
if (mod->text > text_end || mod->text_end < text_start) if (mod->text > text_end || mod->text_end < text_start)
continue; continue;
for (ptr = mod->locks; ptr < mod->locks_end; ptr++) for (poff = mod->locks; poff < mod->locks_end; poff++) {
if (text_start <= *ptr && text_end >= *ptr) const u8 *ptr = (const u8 *)poff + *poff;
if (text_start <= ptr && text_end > ptr)
return 1; return 1;
} }
}
return 0; return 0;
} }
......
...@@ -539,7 +539,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b) ...@@ -539,7 +539,7 @@ void machine_check_poll(enum mcp_flags flags, mce_banks_t *b)
struct mce m; struct mce m;
int i; int i;
__get_cpu_var(mce_poll_count)++; percpu_inc(mce_poll_count);
mce_setup(&m); mce_setup(&m);
...@@ -934,7 +934,7 @@ void do_machine_check(struct pt_regs *regs, long error_code) ...@@ -934,7 +934,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
atomic_inc(&mce_entry); atomic_inc(&mce_entry);
__get_cpu_var(mce_exception_count)++; percpu_inc(mce_exception_count);
if (notify_die(DIE_NMI, "machine check", regs, error_code, if (notify_die(DIE_NMI, "machine check", regs, error_code,
18, SIGKILL) == NOTIFY_STOP) 18, SIGKILL) == NOTIFY_STOP)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment