Commit a6b5d744 authored by Linus Torvalds's avatar Linus Torvalds

Import 2.1.133pre5

parent b7cd5844
...@@ -44,7 +44,6 @@ ...@@ -44,7 +44,6 @@
#include <asm/bitops.h> #include <asm/bitops.h>
#include <asm/pgtable.h> #include <asm/pgtable.h>
#include <asm/io.h> #include <asm/io.h>
#include <linux/io_trace.h>
#ifdef CONFIG_MTRR #ifdef CONFIG_MTRR
# include <asm/mtrr.h> # include <asm/mtrr.h>
...@@ -1225,12 +1224,10 @@ static inline unsigned int __get_ICR (void) ...@@ -1225,12 +1224,10 @@ static inline unsigned int __get_ICR (void)
int count = 0; int count = 0;
unsigned int cfg; unsigned int cfg;
IO_trace (IO_smp_wait_apic_start, 0, 0, 0, 0);
while (count < 1000) while (count < 1000)
{ {
cfg = slow_ICR; cfg = slow_ICR;
if (!(cfg&(1<<12))) { if (!(cfg&(1<<12))) {
IO_trace (IO_smp_wait_apic_end, 0, 0, 0, 0);
if (count) if (count)
atomic_add(count, (atomic_t*)&ipi_count); atomic_add(count, (atomic_t*)&ipi_count);
return cfg; return cfg;
...@@ -1299,9 +1296,6 @@ static inline void __send_IPI_shortcut(unsigned int shortcut, int vector) ...@@ -1299,9 +1296,6 @@ static inline void __send_IPI_shortcut(unsigned int shortcut, int vector)
/* /*
* Send the IPI. The write to APIC_ICR fires this off. * Send the IPI. The write to APIC_ICR fires this off.
*/ */
IO_trace (IO_smp_send_ipi, shortcut, vector, cfg, 0);
apic_write(APIC_ICR, cfg); apic_write(APIC_ICR, cfg);
#if FORCE_APIC_SERIALIZATION #if FORCE_APIC_SERIALIZATION
__restore_flags(flags); __restore_flags(flags);
...@@ -1348,9 +1342,6 @@ static inline void send_IPI_single(int dest, int vector) ...@@ -1348,9 +1342,6 @@ static inline void send_IPI_single(int dest, int vector)
/* /*
* Send the IPI. The write to APIC_ICR fires this off. * Send the IPI. The write to APIC_ICR fires this off.
*/ */
IO_trace (IO_smp_send_ipi, dest, vector, cfg, 0);
apic_write(APIC_ICR, cfg); apic_write(APIC_ICR, cfg);
#if FORCE_APIC_SERIALIZATION #if FORCE_APIC_SERIALIZATION
__restore_flags(flags); __restore_flags(flags);
...@@ -1390,8 +1381,6 @@ void smp_flush_tlb(void) ...@@ -1390,8 +1381,6 @@ void smp_flush_tlb(void)
__save_flags(flags); __save_flags(flags);
__cli(); __cli();
IO_trace (IO_smp_message, 0, 0, 0, 0);
send_IPI_allbutself(INVALIDATE_TLB_VECTOR); send_IPI_allbutself(INVALIDATE_TLB_VECTOR);
/* /*
...@@ -1556,9 +1545,6 @@ void smp_apic_timer_interrupt(struct pt_regs * regs) ...@@ -1556,9 +1545,6 @@ void smp_apic_timer_interrupt(struct pt_regs * regs)
*/ */
asmlinkage void smp_reschedule_interrupt(void) asmlinkage void smp_reschedule_interrupt(void)
{ {
IO_trace (IO_smp_reschedule, current->need_resched,
current->priority, current->counter, 0);
ack_APIC_irq(); ack_APIC_irq();
} }
...@@ -1567,9 +1553,6 @@ asmlinkage void smp_reschedule_interrupt(void) ...@@ -1567,9 +1553,6 @@ asmlinkage void smp_reschedule_interrupt(void)
*/ */
asmlinkage void smp_invalidate_interrupt(void) asmlinkage void smp_invalidate_interrupt(void)
{ {
IO_trace (IO_smp_tlbflush,
atomic_read((atomic_t *)&smp_invalidate_needed), 0, 0, 0);
if (test_and_clear_bit(smp_processor_id(), &smp_invalidate_needed)) if (test_and_clear_bit(smp_processor_id(), &smp_invalidate_needed))
local_flush_tlb(); local_flush_tlb();
......
...@@ -165,8 +165,19 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size) ...@@ -165,8 +165,19 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
* Force strict CPU ordering. * Force strict CPU ordering.
* And yes, this is required on UP too when we're talking * And yes, this is required on UP too when we're talking
* to devices. * to devices.
*
* For now, "wmb()" doesn't actually do anything, as all
* intel CPU's follow what intel calls a *Processor Order*,
* in which all writes are seen in the program order even
* outside the CPU.
*
* I expect future intel CPU's to have a weaker ordering,
* but I'd also expect them to finally get their act together
* and add some real memory barriers if so.
*/ */
#define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory") #define mb() __asm__ __volatile__ ("lock; addl $0,0(%%esp)": : :"memory")
#define rmb() mb()
#define wmb() __asm__ __volatile__ ("": : :"memory")
/* interrupt control.. */ /* interrupt control.. */
#define __sti() __asm__ __volatile__ ("sti": : :"memory") #define __sti() __asm__ __volatile__ ("sti": : :"memory")
......
torvalds@penguin.transmeta.com
\ No newline at end of file
...@@ -594,9 +594,9 @@ static inline void __schedule_tail (void) ...@@ -594,9 +594,9 @@ static inline void __schedule_tail (void)
* We have dropped all locks, and we must make sure that we * We have dropped all locks, and we must make sure that we
* only mark the previous process as no longer having a CPU * only mark the previous process as no longer having a CPU
* after all other state has been seen by other CPU's. Thus * after all other state has been seen by other CPU's. Thus
* the memory barrier! * the write memory barrier!
*/ */
mb(); wmb();
sched_data->prev->has_cpu = 0; sched_data->prev->has_cpu = 0;
#endif /* __SMP__ */ #endif /* __SMP__ */
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment