Commit 247ed088 authored by Dave Jones's avatar Dave Jones Committed by Linus Torvalds

[PATCH] i386 mmx copying bug.

Odd, this is the missing half of the recent fix where we prefetched
too far. Not sure how this bit got dropped. Without it, we still prefetch
past the end of a range of memory.
Also small compiler hint microoptimisations
parent 42294085
...@@ -31,7 +31,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len) ...@@ -31,7 +31,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
void *p; void *p;
int i; int i;
if (in_interrupt()) if (unlikely(in_interrupt()))
return __memcpy(to, from, len); return __memcpy(to, from, len);
p = to; p = to;
...@@ -57,7 +57,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len) ...@@ -57,7 +57,7 @@ void *_mmx_memcpy(void *to, const void *from, size_t len)
: : "r" (from) ); : : "r" (from) );
for(; i>0; i--) for(; i>5; i--)
{ {
__asm__ __volatile__ ( __asm__ __volatile__ (
"1: prefetch 320(%0)\n" "1: prefetch 320(%0)\n"
...@@ -372,7 +372,7 @@ static void slow_zero_page(void * page) ...@@ -372,7 +372,7 @@ static void slow_zero_page(void * page)
void mmx_clear_page(void * page) void mmx_clear_page(void * page)
{ {
if(in_interrupt()) if(unlikely(in_interrupt()))
slow_zero_page(page); slow_zero_page(page);
else else
fast_clear_page(page); fast_clear_page(page);
...@@ -392,7 +392,7 @@ static void slow_copy_page(void *to, void *from) ...@@ -392,7 +392,7 @@ static void slow_copy_page(void *to, void *from)
void mmx_copy_page(void *to, void *from) void mmx_copy_page(void *to, void *from)
{ {
if(in_interrupt()) if(unlikely(in_interrupt()))
slow_copy_page(to, from); slow_copy_page(to, from);
else else
fast_copy_page(to, from); fast_copy_page(to, from);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment