Commit 48ea7530 authored by Linus Torvalds's avatar Linus Torvalds
parents 7c3dbbe9 dc86e88c
...@@ -113,7 +113,7 @@ CONFIG_IOSAPIC=y ...@@ -113,7 +113,7 @@ CONFIG_IOSAPIC=y
CONFIG_IA64_SGI_SN_XP=m CONFIG_IA64_SGI_SN_XP=m
CONFIG_FORCE_MAX_ZONEORDER=17 CONFIG_FORCE_MAX_ZONEORDER=17
CONFIG_SMP=y CONFIG_SMP=y
CONFIG_NR_CPUS=512 CONFIG_NR_CPUS=1024
# CONFIG_HOTPLUG_CPU is not set # CONFIG_HOTPLUG_CPU is not set
CONFIG_SCHED_SMT=y CONFIG_SCHED_SMT=y
CONFIG_PREEMPT=y CONFIG_PREEMPT=y
......
...@@ -249,3 +249,32 @@ time_init (void) ...@@ -249,3 +249,32 @@ time_init (void)
*/ */
set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec);
} }
#define SMALLUSECS 100
void
udelay (unsigned long usecs)
{
unsigned long start;
unsigned long cycles;
unsigned long smallusecs;
/*
* Execute the non-preemptible delay loop (because the ITC might
* not be synchronized between CPUS) in relatively short time
* chunks, allowing preemption between the chunks.
*/
while (usecs > 0) {
smallusecs = (usecs > SMALLUSECS) ? SMALLUSECS : usecs;
preempt_disable();
cycles = smallusecs*local_cpu_data->cyc_per_usec;
start = ia64_get_itc();
while (ia64_get_itc() - start < cycles)
cpu_relax();
preempt_enable();
usecs -= smallusecs;
}
}
EXPORT_SYMBOL(udelay);
...@@ -53,7 +53,7 @@ static void uncached_ipi_visibility(void *data) ...@@ -53,7 +53,7 @@ static void uncached_ipi_visibility(void *data)
if ((status != PAL_VISIBILITY_OK) && if ((status != PAL_VISIBILITY_OK) &&
(status != PAL_VISIBILITY_OK_REMOTE_NEEDED)) (status != PAL_VISIBILITY_OK_REMOTE_NEEDED))
printk(KERN_DEBUG "pal_prefetch_visibility() returns %i on " printk(KERN_DEBUG "pal_prefetch_visibility() returns %i on "
"CPU %i\n", status, get_cpu()); "CPU %i\n", status, raw_smp_processor_id());
} }
...@@ -63,7 +63,7 @@ static void uncached_ipi_mc_drain(void *data) ...@@ -63,7 +63,7 @@ static void uncached_ipi_mc_drain(void *data)
status = ia64_pal_mc_drain(); status = ia64_pal_mc_drain();
if (status) if (status)
printk(KERN_WARNING "ia64_pal_mc_drain() failed with %i on " printk(KERN_WARNING "ia64_pal_mc_drain() failed with %i on "
"CPU %i\n", status, get_cpu()); "CPU %i\n", status, raw_smp_processor_id());
} }
...@@ -105,7 +105,7 @@ uncached_get_new_chunk(struct gen_pool *poolp) ...@@ -105,7 +105,7 @@ uncached_get_new_chunk(struct gen_pool *poolp)
status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL); status = ia64_pal_prefetch_visibility(PAL_VISIBILITY_PHYSICAL);
dprintk(KERN_INFO "pal_prefetch_visibility() returns %i on cpu %i\n", dprintk(KERN_INFO "pal_prefetch_visibility() returns %i on cpu %i\n",
status, get_cpu()); status, raw_smp_processor_id());
if (!status) { if (!status) {
status = smp_call_function(uncached_ipi_visibility, NULL, 0, 1); status = smp_call_function(uncached_ipi_visibility, NULL, 0, 1);
......
...@@ -177,6 +177,9 @@ SECTIONS ...@@ -177,6 +177,9 @@ SECTIONS
} }
. = ALIGN(PAGE_SIZE); /* make sure the gate page doesn't expose kernel data */ . = ALIGN(PAGE_SIZE); /* make sure the gate page doesn't expose kernel data */
.data.read_mostly : AT(ADDR(.data.read_mostly) - LOAD_OFFSET)
{ *(.data.read_mostly) }
.data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET) .data.cacheline_aligned : AT(ADDR(.data.cacheline_aligned) - LOAD_OFFSET)
{ *(.data.cacheline_aligned) } { *(.data.cacheline_aligned) }
......
...@@ -202,7 +202,7 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start, ...@@ -202,7 +202,7 @@ sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start,
unsigned long end, unsigned long nbits) unsigned long end, unsigned long nbits)
{ {
int i, opt, shub1, cnode, mynasid, cpu, lcpu = 0, nasid, flushed = 0; int i, opt, shub1, cnode, mynasid, cpu, lcpu = 0, nasid, flushed = 0;
int mymm = (mm == current->active_mm); int mymm = (mm == current->active_mm && current->mm);
volatile unsigned long *ptc0, *ptc1; volatile unsigned long *ptc0, *ptc1;
unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value; unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value;
short nasids[MAX_NUMNODES], nix; short nasids[MAX_NUMNODES], nix;
......
...@@ -84,14 +84,6 @@ __delay (unsigned long loops) ...@@ -84,14 +84,6 @@ __delay (unsigned long loops)
ia64_delay_loop (loops - 1); ia64_delay_loop (loops - 1);
} }
static __inline__ void extern void udelay (unsigned long usecs);
udelay (unsigned long usecs)
{
unsigned long start = ia64_get_itc();
unsigned long cycles = usecs*local_cpu_data->cyc_per_usec;
while (ia64_get_itc() - start < cycles)
cpu_relax();
}
#endif /* _ASM_IA64_DELAY_H */ #endif /* _ASM_IA64_DELAY_H */
...@@ -13,7 +13,7 @@ ...@@ -13,7 +13,7 @@
#define SMP_CACHE_BYTES L1_CACHE_BYTES #define SMP_CACHE_BYTES L1_CACHE_BYTES
#endif #endif
#if defined(CONFIG_X86) || defined(CONFIG_SPARC64) #if defined(CONFIG_X86) || defined(CONFIG_SPARC64) || defined(CONFIG_IA64)
#define __read_mostly __attribute__((__section__(".data.read_mostly"))) #define __read_mostly __attribute__((__section__(".data.read_mostly")))
#else #else
#define __read_mostly #define __read_mostly
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment