Commit e9b9eb59 authored by David S. Miller's avatar David S. Miller

sparc64: Use pause instruction when available.

In atomic backoff and cpu_relax(), use the pause instruction
found on SPARC-T4 and later.

It makes the cpu strand unselectable for the given number of
cycles, unless an intervening disrupting trap occurs.
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 270c10e0
...@@ -16,6 +16,12 @@ ...@@ -16,6 +16,12 @@
88: rd %ccr, %g0; \ 88: rd %ccr, %g0; \
rd %ccr, %g0; \ rd %ccr, %g0; \
rd %ccr, %g0; \ rd %ccr, %g0; \
.section .pause_patch,"ax"; \
.word 88b; \
sllx tmp, 7, tmp; \
wr tmp, 0, %asr27; \
clr tmp; \
.previous; \
brnz,pt tmp, 88b; \ brnz,pt tmp, 88b; \
sub tmp, 1, tmp; \ sub tmp, 1, tmp; \
set BACKOFF_LIMIT, tmp; \ set BACKOFF_LIMIT, tmp; \
......
...@@ -196,9 +196,16 @@ extern unsigned long get_wchan(struct task_struct *task); ...@@ -196,9 +196,16 @@ extern unsigned long get_wchan(struct task_struct *task);
#define KSTK_EIP(tsk) (task_pt_regs(tsk)->tpc) #define KSTK_EIP(tsk) (task_pt_regs(tsk)->tpc)
#define KSTK_ESP(tsk) (task_pt_regs(tsk)->u_regs[UREG_FP]) #define KSTK_ESP(tsk) (task_pt_regs(tsk)->u_regs[UREG_FP])
#define cpu_relax() asm volatile("rd %%ccr, %%g0\n\t" \ #define cpu_relax() asm volatile("\n99:\n\t" \
"rd %%ccr, %%g0\n\t" \ "rd %%ccr, %%g0\n\t" \
"rd %%ccr, %%g0" \ "rd %%ccr, %%g0\n\t" \
"rd %%ccr, %%g0\n\t" \
".section .pause_patch,\"ax\"\n\t"\
".word 99b\n\t" \
"wr %%g0, 128, %%asr27\n\t" \
"nop\n\t" \
"nop\n\t" \
".previous" \
::: "memory") ::: "memory")
/* Prefetch support. This is tuned for UltraSPARC-III and later. /* Prefetch support. This is tuned for UltraSPARC-III and later.
......
...@@ -59,6 +59,13 @@ struct popc_6insn_patch_entry { ...@@ -59,6 +59,13 @@ struct popc_6insn_patch_entry {
extern struct popc_6insn_patch_entry __popc_6insn_patch, extern struct popc_6insn_patch_entry __popc_6insn_patch,
__popc_6insn_patch_end; __popc_6insn_patch_end;
struct pause_patch_entry {
unsigned int addr;
unsigned int insns[3];
};
extern struct pause_patch_entry __pause_patch,
__pause_patch_end;
extern void __init per_cpu_patch(void); extern void __init per_cpu_patch(void);
extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *, extern void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
struct sun4v_1insn_patch_entry *); struct sun4v_1insn_patch_entry *);
......
...@@ -316,6 +316,25 @@ static void __init popc_patch(void) ...@@ -316,6 +316,25 @@ static void __init popc_patch(void)
} }
} }
static void __init pause_patch(void)
{
struct pause_patch_entry *p;
p = &__pause_patch;
while (p < &__pause_patch_end) {
unsigned long i, addr = p->addr;
for (i = 0; i < 3; i++) {
*(unsigned int *) (addr + (i * 4)) = p->insns[i];
wmb();
__asm__ __volatile__("flush %0"
: : "r" (addr + (i * 4)));
}
p++;
}
}
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
void __init boot_cpu_id_too_large(int cpu) void __init boot_cpu_id_too_large(int cpu)
{ {
...@@ -528,6 +547,8 @@ static void __init init_sparc64_elf_hwcap(void) ...@@ -528,6 +547,8 @@ static void __init init_sparc64_elf_hwcap(void)
if (sparc64_elf_hwcap & AV_SPARC_POPC) if (sparc64_elf_hwcap & AV_SPARC_POPC)
popc_patch(); popc_patch();
if (sparc64_elf_hwcap & AV_SPARC_PAUSE)
pause_patch();
} }
void __init setup_arch(char **cmdline_p) void __init setup_arch(char **cmdline_p)
......
...@@ -132,6 +132,11 @@ SECTIONS ...@@ -132,6 +132,11 @@ SECTIONS
*(.popc_6insn_patch) *(.popc_6insn_patch)
__popc_6insn_patch_end = .; __popc_6insn_patch_end = .;
} }
.pause_patch : {
__pause_patch = .;
*(.pause_patch)
__pause_patch_end = .;
}
PERCPU_SECTION(SMP_CACHE_BYTES) PERCPU_SECTION(SMP_CACHE_BYTES)
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment