Commit ae2e15eb authored by Glauber de Oliveira Costa's avatar Glauber de Oliveira Costa Committed by Ingo Molnar

x86: unify prefetch operations

This patch moves the prefetch[w]? functions to processor.h
Signed-off-by: default avatarGlauber de Oliveira Costa <gcosta@redhat.com>
Signed-off-by: default avatarIngo Molnar <mingo@elte.hu>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
parent 1a53905a
...@@ -596,6 +596,36 @@ extern char ignore_fpu_irq; ...@@ -596,6 +596,36 @@ extern char ignore_fpu_irq;
#define ARCH_HAS_PREFETCHW #define ARCH_HAS_PREFETCHW
#define ARCH_HAS_SPINLOCK_PREFETCH #define ARCH_HAS_SPINLOCK_PREFETCH
#ifdef CONFIG_X86_32
#define BASE_PREFETCH ASM_NOP4
#define ARCH_HAS_PREFETCH
#else
#define BASE_PREFETCH "prefetcht0 (%1)"
#endif
/* Prefetch instructions for Pentium III and AMD Athlon */
/* It's not worth to care about 3dnow! prefetches for the K6
because they are microcoded there and very slow.
However we don't do prefetches for pre XP Athlons currently
That should be fixed. */
static inline void prefetch(const void *x)
{
alternative_input(BASE_PREFETCH,
"prefetchnta (%1)",
X86_FEATURE_XMM,
"r" (x));
}
/* 3dnow! prefetch to get an exclusive cache line. Useful for
spinlocks to avoid one state transition in the cache coherency protocol. */
static inline void prefetchw(const void *x)
{
alternative_input(BASE_PREFETCH,
"prefetchw (%1)",
X86_FEATURE_3DNOW,
"r" (x));
}
#define spin_lock_prefetch(x) prefetchw(x) #define spin_lock_prefetch(x) prefetchw(x)
/* This decides where the kernel will search for a free chunk of vm /* This decides where the kernel will search for a free chunk of vm
* space during mmap's. * space during mmap's.
......
...@@ -228,29 +228,4 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk); ...@@ -228,29 +228,4 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
#define ASM_NOP_MAX 8 #define ASM_NOP_MAX 8
/* Prefetch instructions for Pentium III and AMD Athlon */
/* It's not worth to care about 3dnow! prefetches for the K6
because they are microcoded there and very slow.
However we don't do prefetches for pre XP Athlons currently
That should be fixed. */
static inline void prefetch(const void *x)
{
alternative_input(ASM_NOP4,
"prefetchnta (%1)",
X86_FEATURE_XMM,
"r" (x));
}
#define ARCH_HAS_PREFETCH
/* 3dnow! prefetch to get an exclusive cache line. Useful for
spinlocks to avoid one state transition in the cache coherency protocol. */
static inline void prefetchw(const void *x)
{
alternative_input(ASM_NOP4,
"prefetchw (%1)",
X86_FEATURE_3DNOW,
"r" (x));
}
#endif /* __ASM_I386_PROCESSOR_H */ #endif /* __ASM_I386_PROCESSOR_H */
...@@ -124,12 +124,4 @@ DECLARE_PER_CPU(struct orig_ist, orig_ist); ...@@ -124,12 +124,4 @@ DECLARE_PER_CPU(struct orig_ist, orig_ist);
#define ASM_NOP_MAX 8 #define ASM_NOP_MAX 8
static inline void prefetchw(void *x)
{
alternative_input("prefetcht0 (%1)",
"prefetchw (%1)",
X86_FEATURE_3DNOW,
"r" (x));
}
#endif /* __ASM_X86_64_PROCESSOR_H */ #endif /* __ASM_X86_64_PROCESSOR_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment