Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
167c0fd7
Commit
167c0fd7
authored
Feb 11, 2002
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge
bk://are.twiddle.net:8080/linux-alpha-2.5
into home.transmeta.com:/home/torvalds/v2.5/linux
parents
6c645061
c8075847
Changes
3
Show whitespace changes
Inline
Side-by-side
Showing
3 changed files
with
187 additions
and
175 deletions
+187
-175
arch/alpha/kernel/smp.c
arch/alpha/kernel/smp.c
+151
-155
include/asm-alpha/bitops.h
include/asm-alpha/bitops.h
+20
-8
include/asm-alpha/spinlock.h
include/asm-alpha/spinlock.h
+16
-12
No files found.
arch/alpha/kernel/smp.c
View file @
167c0fd7
...
@@ -62,6 +62,7 @@ static struct {
...
@@ -62,6 +62,7 @@ static struct {
enum
ipi_message_type
{
enum
ipi_message_type
{
IPI_RESCHEDULE
,
IPI_RESCHEDULE
,
IPI_MIGRATION
,
IPI_CALL_FUNC
,
IPI_CALL_FUNC
,
IPI_CPU_STOP
,
IPI_CPU_STOP
,
};
};
...
@@ -69,7 +70,7 @@ enum ipi_message_type {
...
@@ -69,7 +70,7 @@ enum ipi_message_type {
spinlock_t
kernel_flag
__cacheline_aligned_in_smp
=
SPIN_LOCK_UNLOCKED
;
spinlock_t
kernel_flag
__cacheline_aligned_in_smp
=
SPIN_LOCK_UNLOCKED
;
/* Set to a secondary's cpuid when it comes online. */
/* Set to a secondary's cpuid when it comes online. */
static
unsigned
long
smp_secondary_alive
;
static
int
smp_secondary_alive
__initdata
=
0
;
/* Which cpus ids came online. */
/* Which cpus ids came online. */
unsigned
long
cpu_present_mask
;
unsigned
long
cpu_present_mask
;
...
@@ -82,6 +83,7 @@ int smp_num_probed; /* Internal processor count */
...
@@ -82,6 +83,7 @@ int smp_num_probed; /* Internal processor count */
int
smp_num_cpus
=
1
;
/* Number that came online. */
int
smp_num_cpus
=
1
;
/* Number that came online. */
int
smp_threads_ready
;
/* True once the per process idle is forked. */
int
smp_threads_ready
;
/* True once the per process idle is forked. */
cycles_t
cacheflush_time
;
cycles_t
cacheflush_time
;
unsigned
long
cache_decay_ticks
;
int
__cpu_number_map
[
NR_CPUS
];
int
__cpu_number_map
[
NR_CPUS
];
int
__cpu_logical_map
[
NR_CPUS
];
int
__cpu_logical_map
[
NR_CPUS
];
...
@@ -156,13 +158,6 @@ smp_callin(void)
...
@@ -156,13 +158,6 @@ smp_callin(void)
{
{
int
cpuid
=
hard_smp_processor_id
();
int
cpuid
=
hard_smp_processor_id
();
if
(
current
!=
init_tasks
[
cpu_number_map
(
cpuid
)])
{
printk
(
"BUG: smp_calling: cpu %d current %p init_tasks[cpu_number_map(cpuid)] %p
\n
"
,
cpuid
,
current
,
init_tasks
[
cpu_number_map
(
cpuid
)]);
}
DBGS
((
"CALLIN %d state 0x%lx
\n
"
,
cpuid
,
current
->
state
));
/* Turn on machine checks. */
/* Turn on machine checks. */
wrmces
(
7
);
wrmces
(
7
);
...
@@ -175,22 +170,21 @@ smp_callin(void)
...
@@ -175,22 +170,21 @@ smp_callin(void)
/* Get our local ticker going. */
/* Get our local ticker going. */
smp_setup_percpu_timer
(
cpuid
);
smp_setup_percpu_timer
(
cpuid
);
/* All kernel threads share the same mm context. */
atomic_inc
(
&
init_mm
.
mm_count
);
current
->
active_mm
=
&
init_mm
;
/* Must have completely accurate bogos. */
/* Must have completely accurate bogos. */
__sti
();
__sti
();
/*
/* Wait boot CPU to stop with irq enabled before running
* Wait boot CPU to stop with irq enabled before
calibrate_delay. */
* running calibrate_delay().
*/
wait_boot_cpu_to_stop
(
cpuid
);
wait_boot_cpu_to_stop
(
cpuid
);
mb
();
mb
();
calibrate_delay
();
calibrate_delay
();
smp_store_cpu_info
(
cpuid
);
smp_store_cpu_info
(
cpuid
);
/*
/* Allow master to continue only after we written loops_per_jiffy. */
* Allow master to continue only after we written
* the loops_per_jiffy.
*/
wmb
();
wmb
();
smp_secondary_alive
=
1
;
smp_secondary_alive
=
1
;
...
@@ -198,15 +192,9 @@ smp_callin(void)
...
@@ -198,15 +192,9 @@ smp_callin(void)
while
(
!
smp_threads_ready
)
while
(
!
smp_threads_ready
)
barrier
();
barrier
();
DBGS
((
"smp_callin: commencing CPU %d current %p
\n
"
,
DBGS
((
"smp_callin: commencing CPU %d current %p
active_mm %p
\n
"
,
cpuid
,
current
));
cpuid
,
current
,
current
->
active_mm
));
/* Setup the scheduler for this processor. */
init_idle
();
/* ??? This should be in init_idle. */
atomic_inc
(
&
init_mm
.
mm_count
);
current
->
active_mm
=
&
init_mm
;
/* Do nothing. */
/* Do nothing. */
cpu_idle
();
cpu_idle
();
}
}
...
@@ -222,8 +210,9 @@ static void __init
...
@@ -222,8 +210,9 @@ static void __init
smp_tune_scheduling
(
int
cpuid
)
smp_tune_scheduling
(
int
cpuid
)
{
{
struct
percpu_struct
*
cpu
;
struct
percpu_struct
*
cpu
;
unsigned
long
on_chip_cache
;
unsigned
long
on_chip_cache
;
/* kB */
unsigned
long
freq
;
unsigned
long
freq
;
/* Hz */
unsigned
long
bandwidth
=
350
;
/* MB/s */
cpu
=
(
struct
percpu_struct
*
)((
char
*
)
hwrpb
+
hwrpb
->
processor_offset
cpu
=
(
struct
percpu_struct
*
)((
char
*
)
hwrpb
+
hwrpb
->
processor_offset
+
cpuid
*
hwrpb
->
processor_size
);
+
cpuid
*
hwrpb
->
processor_size
);
...
@@ -244,43 +233,54 @@ smp_tune_scheduling (int cpuid)
...
@@ -244,43 +233,54 @@ smp_tune_scheduling (int cpuid)
case
EV6_CPU
:
case
EV6_CPU
:
case
EV67_CPU
:
case
EV67_CPU
:
on_chip_cache
=
64
+
64
;
break
;
default:
default:
on_chip_cache
=
8
+
8
;
on_chip_cache
=
64
+
64
;
break
;
break
;
}
}
freq
=
hwrpb
->
cycle_freq
?
:
est_cycle_freq
;
freq
=
hwrpb
->
cycle_freq
?
:
est_cycle_freq
;
#if 0
cacheflush_time
=
(
freq
/
1000000
)
*
(
on_chip_cache
<<
10
)
/
bandwidth
;
/* Magic estimation stolen from x86 port. */
cache_decay_ticks
=
cacheflush_time
/
(
freq
/
1000
)
*
HZ
/
1000
;
cacheflush_time = freq / 1024L * on_chip_cache / 5000L;
printk("Using heuristic of %d cycles.\n",
printk
(
"per-CPU timeslice cutoff: %ld.%02ld usecs.
\n
"
,
cacheflush_time);
cacheflush_time
/
(
freq
/
1000000
),
#else
(
cacheflush_time
*
100
/
(
freq
/
1000000
))
%
100
);
/* Magic value to force potential preemption of other CPUs. */
printk
(
"task migration cache decay timeout: %ld msecs.
\n
"
,
cacheflush_time
=
INT_MAX
;
(
cache_decay_ticks
+
1
)
*
1000
/
HZ
);
}
printk
(
"Using heuristic of %d cycles.
\n
"
,
/* Wait until hwrpb->txrdy is clear for cpu. Return -1 on timeout. */
cacheflush_time
);
static
int
__init
#endif
wait_for_txrdy
(
unsigned
long
cpumask
)
{
unsigned
long
timeout
;
if
(
!
(
hwrpb
->
txrdy
&
cpumask
))
return
0
;
timeout
=
jiffies
+
10
*
HZ
;
while
(
time_before
(
jiffies
,
timeout
))
{
if
(
!
(
hwrpb
->
txrdy
&
cpumask
))
return
0
;
udelay
(
10
);
barrier
();
}
return
-
1
;
}
}
/*
/*
* Send a message to a secondary's console. "START" is one such
* Send a message to a secondary's console. "START" is one such
* interesting message. ;-)
* interesting message. ;-)
*/
*/
static
void
static
void
__init
send_secondary_console_msg
(
char
*
str
,
int
cpuid
)
send_secondary_console_msg
(
char
*
str
,
int
cpuid
)
{
{
struct
percpu_struct
*
cpu
;
struct
percpu_struct
*
cpu
;
register
char
*
cp1
,
*
cp2
;
register
char
*
cp1
,
*
cp2
;
unsigned
long
cpumask
;
unsigned
long
cpumask
;
size_t
len
;
size_t
len
;
long
timeout
;
cpu
=
(
struct
percpu_struct
*
)
cpu
=
(
struct
percpu_struct
*
)
((
char
*
)
hwrpb
((
char
*
)
hwrpb
...
@@ -288,9 +288,8 @@ send_secondary_console_msg(char *str, int cpuid)
...
@@ -288,9 +288,8 @@ send_secondary_console_msg(char *str, int cpuid)
+
cpuid
*
hwrpb
->
processor_size
);
+
cpuid
*
hwrpb
->
processor_size
);
cpumask
=
(
1UL
<<
cpuid
);
cpumask
=
(
1UL
<<
cpuid
);
if
(
hwrpb
->
txrdy
&
cpumask
)
if
(
wait_for_txrdy
(
cpumask
))
goto
delay1
;
goto
timeout
;
ready1:
cp2
=
str
;
cp2
=
str
;
len
=
strlen
(
cp2
);
len
=
strlen
(
cp2
);
...
@@ -302,34 +301,12 @@ send_secondary_console_msg(char *str, int cpuid)
...
@@ -302,34 +301,12 @@ send_secondary_console_msg(char *str, int cpuid)
wmb
();
wmb
();
set_bit
(
cpuid
,
&
hwrpb
->
rxrdy
);
set_bit
(
cpuid
,
&
hwrpb
->
rxrdy
);
if
(
hwrpb
->
txrdy
&
cpumask
)
if
(
wait_for_txrdy
(
cpumask
))
goto
delay2
;
ready2:
return
;
delay1:
/* Wait 10 seconds. Note that jiffies aren't ticking yet. */
for
(
timeout
=
1000000
;
timeout
>
0
;
--
timeout
)
{
if
(
!
(
hwrpb
->
txrdy
&
cpumask
))
goto
ready1
;
udelay
(
10
);
barrier
();
}
goto
timeout
;
delay2:
/* Wait 10 seconds. */
for
(
timeout
=
1000000
;
timeout
>
0
;
--
timeout
)
{
if
(
!
(
hwrpb
->
txrdy
&
cpumask
))
goto
ready2
;
udelay
(
10
);
barrier
();
}
goto
timeout
;
goto
timeout
;
return
;
timeout:
timeout:
printk
(
"Processor %x not ready
\n
"
,
cpuid
);
printk
(
"Processor %x not ready
\n
"
,
cpuid
);
return
;
}
}
/*
/*
...
@@ -392,7 +369,7 @@ static int __init
...
@@ -392,7 +369,7 @@ static int __init
secondary_cpu_start
(
int
cpuid
,
struct
task_struct
*
idle
)
secondary_cpu_start
(
int
cpuid
,
struct
task_struct
*
idle
)
{
{
struct
percpu_struct
*
cpu
;
struct
percpu_struct
*
cpu
;
struct
pcb_struct
*
hwpcb
;
struct
pcb_struct
*
hwpcb
,
*
ipcb
;
long
timeout
;
long
timeout
;
cpu
=
(
struct
percpu_struct
*
)
cpu
=
(
struct
percpu_struct
*
)
...
@@ -400,18 +377,19 @@ secondary_cpu_start(int cpuid, struct task_struct *idle)
...
@@ -400,18 +377,19 @@ secondary_cpu_start(int cpuid, struct task_struct *idle)
+
hwrpb
->
processor_offset
+
hwrpb
->
processor_offset
+
cpuid
*
hwrpb
->
processor_size
);
+
cpuid
*
hwrpb
->
processor_size
);
hwpcb
=
(
struct
pcb_struct
*
)
cpu
->
hwpcb
;
hwpcb
=
(
struct
pcb_struct
*
)
cpu
->
hwpcb
;
ipcb
=
&
idle
->
thread_info
->
pcb
;
/* Initialize the CPU's HWPCB to something just good enough for
/* Initialize the CPU's HWPCB to something just good enough for
us to get started. Immediately after starting, we'll swpctx
us to get started. Immediately after starting, we'll swpctx
to the target idle task's p
t
b. Reuse the stack in the mean
to the target idle task's p
c
b. Reuse the stack in the mean
time. Precalculate the target PCBB. */
time. Precalculate the target PCBB. */
hwpcb
->
ksp
=
(
unsigned
long
)
idle
+
sizeof
(
union
task
_union
)
-
16
;
hwpcb
->
ksp
=
(
unsigned
long
)
ipcb
+
sizeof
(
union
thread
_union
)
-
16
;
hwpcb
->
usp
=
0
;
hwpcb
->
usp
=
0
;
hwpcb
->
ptbr
=
i
dle
->
thread
.
ptbr
;
hwpcb
->
ptbr
=
i
pcb
->
ptbr
;
hwpcb
->
pcc
=
0
;
hwpcb
->
pcc
=
0
;
hwpcb
->
asn
=
0
;
hwpcb
->
asn
=
0
;
hwpcb
->
unique
=
virt_to_phys
(
&
idle
->
thread
);
hwpcb
->
unique
=
virt_to_phys
(
ipcb
);
hwpcb
->
flags
=
i
dle
->
thread
.
pal_
flags
;
hwpcb
->
flags
=
i
pcb
->
flags
;
hwpcb
->
res1
=
hwpcb
->
res2
=
0
;
hwpcb
->
res1
=
hwpcb
->
res2
=
0
;
#if 0
#if 0
...
@@ -419,7 +397,7 @@ secondary_cpu_start(int cpuid, struct task_struct *idle)
...
@@ -419,7 +397,7 @@ secondary_cpu_start(int cpuid, struct task_struct *idle)
hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique));
hwpcb->ksp, hwpcb->ptbr, hwrpb->vptb, hwpcb->unique));
#endif
#endif
DBGS
((
"Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx
\n
"
,
DBGS
((
"Starting secondary cpu %d: state 0x%lx pal_flags 0x%lx
\n
"
,
cpuid
,
idle
->
state
,
i
dle
->
thread
.
pal_
flags
));
cpuid
,
idle
->
state
,
i
pcb
->
flags
));
/* Setup HWRPB fields that SRM uses to activate secondary CPU */
/* Setup HWRPB fields that SRM uses to activate secondary CPU */
hwrpb
->
CPU_restart
=
__smp_callin
;
hwrpb
->
CPU_restart
=
__smp_callin
;
...
@@ -439,9 +417,9 @@ secondary_cpu_start(int cpuid, struct task_struct *idle)
...
@@ -439,9 +417,9 @@ secondary_cpu_start(int cpuid, struct task_struct *idle)
send_secondary_console_msg
(
"START
\r\n
"
,
cpuid
);
send_secondary_console_msg
(
"START
\r\n
"
,
cpuid
);
/* Wait 10 seconds for an ACK from the console.
Note that jiffies
/* Wait 10 seconds for an ACK from the console.
*/
aren't ticking yet. */
timeout
=
jiffies
+
10
*
HZ
;
for
(
timeout
=
1000000
;
timeout
>
0
;
timeout
--
)
{
while
(
time_before
(
jiffies
,
timeout
)
)
{
if
(
cpu
->
flags
&
1
)
if
(
cpu
->
flags
&
1
)
goto
started
;
goto
started
;
udelay
(
10
);
udelay
(
10
);
...
@@ -450,18 +428,17 @@ secondary_cpu_start(int cpuid, struct task_struct *idle)
...
@@ -450,18 +428,17 @@ secondary_cpu_start(int cpuid, struct task_struct *idle)
printk
(
KERN_ERR
"SMP: Processor %d failed to start.
\n
"
,
cpuid
);
printk
(
KERN_ERR
"SMP: Processor %d failed to start.
\n
"
,
cpuid
);
return
-
1
;
return
-
1
;
started:
started:
DBGS
((
"secondary_cpu_start: SUCCESS for CPU %d!!!
\n
"
,
cpuid
));
DBGS
((
"secondary_cpu_start: SUCCESS for CPU %d!!!
\n
"
,
cpuid
));
return
0
;
return
0
;
}
}
static
int
__init
fork_by_hand
(
void
)
static
int
__init
fork_by_hand
(
void
)
{
{
/* Don't care about the contents of regs since we'll never
reschedule the forked task. */
struct
pt_regs
regs
;
struct
pt_regs
regs
;
/*
* don't care about the regs settings since
* we'll never reschedule the forked task.
*/
return
do_fork
(
CLONE_VM
|
CLONE_PID
,
0
,
&
regs
,
0
);
return
do_fork
(
CLONE_VM
|
CLONE_PID
,
0
,
&
regs
,
0
);
}
}
...
@@ -474,67 +451,57 @@ smp_boot_one_cpu(int cpuid, int cpunum)
...
@@ -474,67 +451,57 @@ smp_boot_one_cpu(int cpuid, int cpunum)
struct
task_struct
*
idle
;
struct
task_struct
*
idle
;
long
timeout
;
long
timeout
;
/* Cook up an idler for this guy. Note that the address we give
/* Cook up an idler for this guy. Note that the address we
to kernel_thread is irrelevant -- it's going to start where
give to kernel_thread is irrelevant -- it's going to start
HWRPB.CPU_restart says to start. But this gets all the other
where HWRPB.CPU_restart says to start. But this gets all
task-y sort of data structures set up like we wish. */
the other task-y sort of data structures set up like we
/*
wish. We can't use kernel_thread since we must avoid
* We can't use kernel_thread since we must avoid to
rescheduling the child. */
* reschedule the child.
*/
if
(
fork_by_hand
()
<
0
)
if
(
fork_by_hand
()
<
0
)
panic
(
"failed fork for CPU %d"
,
cpuid
);
panic
(
"failed fork for CPU %d"
,
cpuid
);
idle
=
init_task
.
prev_task
;
idle
=
init_task
.
prev_task
;
if
(
!
idle
)
if
(
!
idle
)
panic
(
"No idle process for CPU %d"
,
cpuid
);
panic
(
"No idle process for CPU %d"
,
cpuid
);
if
(
idle
==
&
init_task
)
panic
(
"idle process is init_task for CPU %d"
,
cpuid
);
idle
->
processor
=
cpuid
;
init_idle
(
idle
,
cpuid
);
idle
->
cpus_runnable
=
1
<<
cpuid
;
/* we schedule the first task manually */
unhash_process
(
idle
);
__cpu_logical_map
[
cpunum
]
=
cpuid
;
__cpu_logical_map
[
cpunum
]
=
cpuid
;
__cpu_number_map
[
cpuid
]
=
cpunum
;
__cpu_number_map
[
cpuid
]
=
cpunum
;
del_from_runqueue
(
idle
);
unhash_process
(
idle
);
init_tasks
[
cpunum
]
=
idle
;
DBGS
((
"smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx
\n
"
,
DBGS
((
"smp_boot_one_cpu: CPU %d state 0x%lx flags 0x%lx
\n
"
,
cpuid
,
idle
->
state
,
idle
->
flags
));
cpuid
,
idle
->
state
,
idle
->
flags
));
/* The secondary will change this once it is happy. Note that
/* Signal the secondary to wait a moment. */
secondary_cpu_start contains the necessary memory barrier. */
smp_secondary_alive
=
-
1
;
smp_secondary_alive
=
-
1
;
/* Whirrr, whirrr, whirrrrrrrrr... */
/* Whirrr, whirrr, whirrrrrrrrr... */
if
(
secondary_cpu_start
(
cpuid
,
idle
))
if
(
secondary_cpu_start
(
cpuid
,
idle
))
return
-
1
;
return
-
1
;
/* Notify the secondary CPU it can run calibrate_delay. */
mb
();
mb
();
/* Notify the secondary CPU it can run calibrate_delay() */
smp_secondary_alive
=
0
;
smp_secondary_alive
=
0
;
/* We've been acked by the console; wait one second for the task
/* We've been acked by the console; wait one second for
to start up for real. Note that jiffies aren't ticking yet. */
the task to start up for real. */
for
(
timeout
=
0
;
timeout
<
1000000
;
timeout
++
)
{
timeout
=
jiffies
+
1
*
HZ
;
while
(
time_before
(
jiffies
,
timeout
))
{
if
(
smp_secondary_alive
==
1
)
if
(
smp_secondary_alive
==
1
)
goto
alive
;
goto
alive
;
udelay
(
10
);
udelay
(
10
);
barrier
();
barrier
();
}
}
/*
we must invalidate our stuff as we failed to boot the CPU
*/
/*
We must invalidate our stuff as we failed to boot the CPU.
*/
__cpu_logical_map
[
cpunum
]
=
-
1
;
__cpu_logical_map
[
cpunum
]
=
-
1
;
__cpu_number_map
[
cpuid
]
=
-
1
;
__cpu_number_map
[
cpuid
]
=
-
1
;
/* the idle task is local to us so free it as we don't use it */
free_task_struct
(
idle
);
printk
(
KERN_ERR
"SMP: Processor %d is stuck.
\n
"
,
cpuid
);
printk
(
KERN_ERR
"SMP: Processor %d is stuck.
\n
"
,
cpuid
);
return
-
1
;
return
-
1
;
alive:
alive:
/* Another "Red Snapper". */
/* Another "Red Snapper". */
return
0
;
return
0
;
}
}
...
@@ -605,20 +572,15 @@ smp_boot_cpus(void)
...
@@ -605,20 +572,15 @@ smp_boot_cpus(void)
__cpu_number_map
[
boot_cpuid
]
=
0
;
__cpu_number_map
[
boot_cpuid
]
=
0
;
__cpu_logical_map
[
0
]
=
boot_cpuid
;
__cpu_logical_map
[
0
]
=
boot_cpuid
;
current
->
processor
=
boot_cpuid
;
current
_thread_info
()
->
cpu
=
boot_cpuid
;
smp_store_cpu_info
(
boot_cpuid
);
smp_store_cpu_info
(
boot_cpuid
);
smp_tune_scheduling
(
boot_cpuid
);
smp_tune_scheduling
(
boot_cpuid
);
smp_setup_percpu_timer
(
boot_cpuid
);
smp_setup_percpu_timer
(
boot_cpuid
);
init_idle
();
/* ??? This should be in init_idle. */
atomic_inc
(
&
init_mm
.
mm_count
);
current
->
active_mm
=
&
init_mm
;
/* Nothing to do on a UP box, or when told not to. */
/* Nothing to do on a UP box, or when told not to. */
if
(
smp_num_probed
==
1
||
max_cpus
==
0
)
{
if
(
smp_num_probed
==
1
||
max_cpus
==
0
)
{
cpu_present_mask
=
1UL
<<
boot_cpuid
;
printk
(
KERN_INFO
"SMP mode deactivated.
\n
"
);
printk
(
KERN_INFO
"SMP mode deactivated.
\n
"
);
return
;
return
;
}
}
...
@@ -707,26 +669,35 @@ setup_profiling_timer(unsigned int multiplier)
...
@@ -707,26 +669,35 @@ setup_profiling_timer(unsigned int multiplier)
static
void
static
void
send_ipi_message
(
unsigned
long
to_whom
,
enum
ipi_message_type
operation
)
send_ipi_message
(
unsigned
long
to_whom
,
enum
ipi_message_type
operation
)
{
{
long
i
,
j
;
unsigned
long
i
,
set
,
n
;
/* Reduce the number of memory barriers by doing two loops,
one to set the bits, one to invoke the interrupts. */
mb
();
/* Order out-of-band data and bit setting. */
for
(
i
=
0
,
j
=
1
;
i
<
NR_CPUS
;
++
i
,
j
<<=
1
)
{
set
=
to_whom
&
-
to_whom
;
if
(
to_whom
&
j
)
if
(
to_whom
==
set
)
{
set_bit
(
operation
,
&
ipi_data
[
i
].
bits
);
n
=
__ffs
(
set
);
mb
();
set_bit
(
operation
,
&
ipi_data
[
n
].
bits
);
mb
();
wripir
(
n
);
}
else
{
mb
();
for
(
i
=
to_whom
;
i
;
i
&=
~
set
)
{
set
=
i
&
-
i
;
n
=
__ffs
(
set
);
set_bit
(
operation
,
&
ipi_data
[
n
].
bits
);
}
}
mb
();
/* Order bit setting and interrupt. */
mb
();
for
(
i
=
to_whom
;
i
;
i
&=
~
set
)
{
for
(
i
=
0
,
j
=
1
;
i
<
NR_CPUS
;
++
i
,
j
<<=
1
)
{
set
=
i
&
-
i
;
if
(
to_whom
&
j
)
n
=
__ffs
(
set
);
wripir
(
i
);
wripir
(
n
);
}
}
}
}
}
/* Data for IPI_MIGRATION. */
static
task_t
*
migration_task
;
/* Structure and data for smp_call_function. This is designed to
/* Structure and data for smp_call_function. This is designed to
minimize static memory requirements. Plus it looks cleaner. */
minimize static memory requirements. Plus it looks cleaner. */
...
@@ -743,13 +714,13 @@ static struct smp_call_struct *smp_call_function_data;
...
@@ -743,13 +714,13 @@ static struct smp_call_struct *smp_call_function_data;
/* Atomicly drop data into a shared pointer. The pointer is free if
/* Atomicly drop data into a shared pointer. The pointer is free if
it is initially locked. If retry, spin until free. */
it is initially locked. If retry, spin until free. */
static
in
line
in
t
static
int
pointer_lock
(
void
*
lock
,
void
*
data
,
int
retry
)
pointer_lock
(
void
*
lock
,
void
*
data
,
int
retry
)
{
{
void
*
old
,
*
tmp
;
void
*
old
,
*
tmp
;
mb
();
mb
();
again:
again:
/* Compare and swap with zero. */
/* Compare and swap with zero. */
asm
volatile
(
asm
volatile
(
"1: ldq_l %0,%1
\n
"
"1: ldq_l %0,%1
\n
"
...
@@ -792,13 +763,25 @@ handle_ipi(struct pt_regs *regs)
...
@@ -792,13 +763,25 @@ handle_ipi(struct pt_regs *regs)
which
=
ops
&
-
ops
;
which
=
ops
&
-
ops
;
ops
&=
~
which
;
ops
&=
~
which
;
which
=
ffz
(
~
which
);
which
=
__ffs
(
which
);
if
(
which
==
IPI_RESCHEDULE
)
{
switch
(
which
)
{
case
IPI_RESCHEDULE
:
/* Reschedule callback. Everything to be done
/* Reschedule callback. Everything to be done
is done by the interrupt return path. */
is done by the interrupt return path. */
break
;
case
IPI_MIGRATION
:
{
task_t
*
t
=
migration_task
;
mb
();
migration_task
=
0
;
sched_task_migrated
(
t
);
break
;
}
}
else
if
(
which
==
IPI_CALL_FUNC
)
{
case
IPI_CALL_FUNC
:
{
struct
smp_call_struct
*
data
;
struct
smp_call_struct
*
data
;
void
(
*
func
)(
void
*
info
);
void
(
*
func
)(
void
*
info
);
void
*
info
;
void
*
info
;
...
@@ -821,13 +804,16 @@ handle_ipi(struct pt_regs *regs)
...
@@ -821,13 +804,16 @@ handle_ipi(struct pt_regs *regs)
/* Notify the sending CPU that the task is done. */
/* Notify the sending CPU that the task is done. */
mb
();
mb
();
if
(
wait
)
atomic_dec
(
&
data
->
unfinished_count
);
if
(
wait
)
atomic_dec
(
&
data
->
unfinished_count
);
break
;
}
}
else
if
(
which
==
IPI_CPU_STOP
)
{
case
IPI_CPU_STOP
:
halt
();
halt
();
}
else
{
default:
printk
(
KERN_CRIT
"Unknown IPI on CPU %d: %lu
\n
"
,
printk
(
KERN_CRIT
"Unknown IPI on CPU %d: %lu
\n
"
,
this_cpu
,
which
);
this_cpu
,
which
);
break
;
}
}
}
while
(
ops
);
}
while
(
ops
);
...
@@ -851,10 +837,23 @@ smp_send_reschedule(int cpu)
...
@@ -851,10 +837,23 @@ smp_send_reschedule(int cpu)
send_ipi_message
(
1UL
<<
cpu
,
IPI_RESCHEDULE
);
send_ipi_message
(
1UL
<<
cpu
,
IPI_RESCHEDULE
);
}
}
void
smp_migrate_task
(
int
cpu
,
task_t
*
t
)
{
#if DEBUG_IPI_MSG
if
(
cpu
==
hard_smp_processor_id
())
printk
(
KERN_WARNING
"smp_migrate_task: Sending IPI to self.
\n
"
);
#endif
/* Acquire the migration_task mutex. */
pointer_lock
(
&
migration_task
,
t
,
1
);
send_ipi_message
(
1UL
<<
cpu
,
IPI_MIGRATION
);
}
void
void
smp_send_stop
(
void
)
smp_send_stop
(
void
)
{
{
unsigned
long
to_whom
=
cpu_present_mask
^
(
1UL
<<
smp_processor_id
());
unsigned
long
to_whom
=
cpu_present_mask
&
~
(
1UL
<<
smp_processor_id
());
#if DEBUG_IPI_MSG
#if DEBUG_IPI_MSG
if
(
hard_smp_processor_id
()
!=
boot_cpu_id
)
if
(
hard_smp_processor_id
()
!=
boot_cpu_id
)
printk
(
KERN_WARNING
"smp_send_stop: Not on boot cpu.
\n
"
);
printk
(
KERN_WARNING
"smp_send_stop: Not on boot cpu.
\n
"
);
...
@@ -881,16 +880,13 @@ smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry,
...
@@ -881,16 +880,13 @@ smp_call_function_on_cpu (void (*func) (void *info), void *info, int retry,
struct
smp_call_struct
data
;
struct
smp_call_struct
data
;
long
timeout
;
long
timeout
;
int
num_cpus_to_call
;
int
num_cpus_to_call
;
long
i
,
j
;
data
.
func
=
func
;
data
.
func
=
func
;
data
.
info
=
info
;
data
.
info
=
info
;
data
.
wait
=
wait
;
data
.
wait
=
wait
;
to_whom
&=
~
(
1L
<<
smp_processor_id
());
to_whom
&=
~
(
1L
<<
smp_processor_id
());
for
(
i
=
0
,
j
=
1
,
num_cpus_to_call
=
0
;
i
<
NR_CPUS
;
++
i
,
j
<<=
1
)
num_cpus_to_call
=
hweight64
(
to_whom
);
if
(
to_whom
&
j
)
num_cpus_to_call
++
;
atomic_set
(
&
data
.
unstarted_count
,
num_cpus_to_call
);
atomic_set
(
&
data
.
unstarted_count
,
num_cpus_to_call
);
atomic_set
(
&
data
.
unfinished_count
,
num_cpus_to_call
);
atomic_set
(
&
data
.
unfinished_count
,
num_cpus_to_call
);
...
@@ -1094,7 +1090,7 @@ flush_icache_page(struct vm_area_struct *vma, struct page *page)
...
@@ -1094,7 +1090,7 @@ flush_icache_page(struct vm_area_struct *vma, struct page *page)
#ifdef CONFIG_DEBUG_SPINLOCK
#ifdef CONFIG_DEBUG_SPINLOCK
void
void
spin_unlock
(
spinlock_t
*
lock
)
_raw_
spin_unlock
(
spinlock_t
*
lock
)
{
{
mb
();
mb
();
lock
->
lock
=
0
;
lock
->
lock
=
0
;
...
@@ -1185,7 +1181,7 @@ debug_spin_trylock(spinlock_t * lock, const char *base_file, int line_no)
...
@@ -1185,7 +1181,7 @@ debug_spin_trylock(spinlock_t * lock, const char *base_file, int line_no)
#endif
/* CONFIG_DEBUG_SPINLOCK */
#endif
/* CONFIG_DEBUG_SPINLOCK */
#ifdef CONFIG_DEBUG_RWLOCK
#ifdef CONFIG_DEBUG_RWLOCK
void
write_lock
(
rwlock_t
*
lock
)
void
_raw_
write_lock
(
rwlock_t
*
lock
)
{
{
long
regx
,
regy
;
long
regx
,
regy
;
int
stuck_lock
,
stuck_reader
;
int
stuck_lock
,
stuck_reader
;
...
@@ -1230,7 +1226,7 @@ void write_lock(rwlock_t * lock)
...
@@ -1230,7 +1226,7 @@ void write_lock(rwlock_t * lock)
}
}
}
}
void
read_lock
(
rwlock_t
*
lock
)
void
_raw_
read_lock
(
rwlock_t
*
lock
)
{
{
long
regx
;
long
regx
;
int
stuck_lock
;
int
stuck_lock
;
...
...
include/asm-alpha/bitops.h
View file @
167c0fd7
...
@@ -3,6 +3,7 @@
...
@@ -3,6 +3,7 @@
#include <linux/config.h>
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/kernel.h>
#include <asm/compiler.h>
/*
/*
* Copyright 1994, Linus Torvalds.
* Copyright 1994, Linus Torvalds.
...
@@ -60,14 +61,14 @@ clear_bit(unsigned long nr, volatile void * addr)
...
@@ -60,14 +61,14 @@ clear_bit(unsigned long nr, volatile void * addr)
__asm__
__volatile__
(
__asm__
__volatile__
(
"1: ldl_l %0,%3
\n
"
"1: ldl_l %0,%3
\n
"
"
and
%0,%2,%0
\n
"
"
bic
%0,%2,%0
\n
"
" stl_c %0,%1
\n
"
" stl_c %0,%1
\n
"
" beq %0,2f
\n
"
" beq %0,2f
\n
"
".subsection 2
\n
"
".subsection 2
\n
"
"2: br 1b
\n
"
"2: br 1b
\n
"
".previous"
".previous"
:
"=&r"
(
temp
),
"=m"
(
*
m
)
:
"=&r"
(
temp
),
"=m"
(
*
m
)
:
"Ir"
(
~
(
1UL
<<
(
nr
&
31
)
)),
"m"
(
*
m
));
:
"Ir"
(
1UL
<<
(
nr
&
31
)),
"m"
(
*
m
));
}
}
/*
/*
...
@@ -246,12 +247,15 @@ test_bit(int nr, volatile void * addr)
...
@@ -246,12 +247,15 @@ test_bit(int nr, volatile void * addr)
*/
*/
static
inline
unsigned
long
ffz_b
(
unsigned
long
x
)
static
inline
unsigned
long
ffz_b
(
unsigned
long
x
)
{
{
unsigned
long
sum
=
0
;
unsigned
long
sum
,
x1
,
x2
,
x4
;
x
=
~
x
&
-~
x
;
/* set first 0 bit, clear others */
x
=
~
x
&
-~
x
;
/* set first 0 bit, clear others */
if
(
x
&
0xF0
)
sum
+=
4
;
x1
=
x
&
0xAA
;
if
(
x
&
0xCC
)
sum
+=
2
;
x2
=
x
&
0xCC
;
if
(
x
&
0xAA
)
sum
+=
1
;
x4
=
x
&
0xF0
;
sum
=
x2
?
2
:
0
;
sum
+=
(
x4
!=
0
)
*
4
;
sum
+=
(
x1
!=
0
);
return
sum
;
return
sum
;
}
}
...
@@ -268,7 +272,7 @@ static inline unsigned long ffz(unsigned long word)
...
@@ -268,7 +272,7 @@ static inline unsigned long ffz(unsigned long word)
__asm__
(
"cmpbge %1,%2,%0"
:
"=r"
(
bits
)
:
"r"
(
word
),
"r"
(
~
0UL
));
__asm__
(
"cmpbge %1,%2,%0"
:
"=r"
(
bits
)
:
"r"
(
word
),
"r"
(
~
0UL
));
qofs
=
ffz_b
(
bits
);
qofs
=
ffz_b
(
bits
);
__asm__
(
"extbl %1,%2,%0"
:
"=r"
(
bits
)
:
"r"
(
word
),
"r"
(
qofs
)
);
bits
=
__kernel_extbl
(
word
,
qofs
);
bofs
=
ffz_b
(
bits
);
bofs
=
ffz_b
(
bits
);
return
qofs
*
8
+
bofs
;
return
qofs
*
8
+
bofs
;
...
@@ -290,7 +294,7 @@ static inline unsigned long __ffs(unsigned long word)
...
@@ -290,7 +294,7 @@ static inline unsigned long __ffs(unsigned long word)
__asm__
(
"cmpbge $31,%1,%0"
:
"=r"
(
bits
)
:
"r"
(
word
));
__asm__
(
"cmpbge $31,%1,%0"
:
"=r"
(
bits
)
:
"r"
(
word
));
qofs
=
ffz_b
(
bits
);
qofs
=
ffz_b
(
bits
);
__asm__
(
"extbl %1,%2,%0"
:
"=r"
(
bits
)
:
"r"
(
word
),
"r"
(
qofs
)
);
bits
=
__kernel_extbl
(
word
,
qofs
);
bofs
=
ffz_b
(
~
bits
);
bofs
=
ffz_b
(
~
bits
);
return
qofs
*
8
+
bofs
;
return
qofs
*
8
+
bofs
;
...
@@ -349,6 +353,14 @@ static inline unsigned long hweight64(unsigned long w)
...
@@ -349,6 +353,14 @@ static inline unsigned long hweight64(unsigned long w)
#define hweight16(x) hweight64((x) & 0xfffful)
#define hweight16(x) hweight64((x) & 0xfffful)
#define hweight8(x) hweight64((x) & 0xfful)
#define hweight8(x) hweight64((x) & 0xfful)
#else
#else
static
inline
unsigned
long
hweight64
(
unsigned
long
w
)
{
unsigned
long
result
;
for
(
result
=
0
;
w
;
w
>>=
1
)
result
+=
(
w
&
1
);
return
result
;
}
#define hweight32(x) generic_hweight32(x)
#define hweight32(x) generic_hweight32(x)
#define hweight16(x) generic_hweight16(x)
#define hweight16(x) generic_hweight16(x)
#define hweight8(x) generic_hweight8(x)
#define hweight8(x) generic_hweight8(x)
...
...
include/asm-alpha/spinlock.h
View file @
167c0fd7
...
@@ -38,12 +38,12 @@ typedef struct {
...
@@ -38,12 +38,12 @@ typedef struct {
#define spin_unlock_wait(x) ({ do { barrier(); } while ((x)->lock); })
#define spin_unlock_wait(x) ({ do { barrier(); } while ((x)->lock); })
#if CONFIG_DEBUG_SPINLOCK
#if CONFIG_DEBUG_SPINLOCK
extern
void
spin_unlock
(
spinlock_t
*
lock
);
extern
void
_raw_
spin_unlock
(
spinlock_t
*
lock
);
extern
void
debug_spin_lock
(
spinlock_t
*
lock
,
const
char
*
,
int
);
extern
void
debug_spin_lock
(
spinlock_t
*
lock
,
const
char
*
,
int
);
extern
int
debug_spin_trylock
(
spinlock_t
*
lock
,
const
char
*
,
int
);
extern
int
debug_spin_trylock
(
spinlock_t
*
lock
,
const
char
*
,
int
);
#define spin_lock(LOCK) debug_spin_lock(LOCK, __BASE_FILE__, __LINE__)
#define
_raw_
spin_lock(LOCK) debug_spin_lock(LOCK, __BASE_FILE__, __LINE__)
#define spin_trylock(LOCK) debug_spin_trylock(LOCK, __BASE_FILE__, __LINE__)
#define
_raw_
spin_trylock(LOCK) debug_spin_trylock(LOCK, __BASE_FILE__, __LINE__)
#define spin_lock_own(LOCK, LOCATION) \
#define spin_lock_own(LOCK, LOCATION) \
do { \
do { \
...
@@ -54,13 +54,13 @@ do { \
...
@@ -54,13 +54,13 @@ do { \
(LOCK)->lock ? "taken" : "freed", (LOCK)->on_cpu); \
(LOCK)->lock ? "taken" : "freed", (LOCK)->on_cpu); \
} while (0)
} while (0)
#else
#else
static
inline
void
spin_unlock
(
spinlock_t
*
lock
)
static
inline
void
_raw_
spin_unlock
(
spinlock_t
*
lock
)
{
{
mb
();
mb
();
lock
->
lock
=
0
;
lock
->
lock
=
0
;
}
}
static
inline
void
spin_lock
(
spinlock_t
*
lock
)
static
inline
void
_raw_
spin_lock
(
spinlock_t
*
lock
)
{
{
long
tmp
;
long
tmp
;
...
@@ -83,7 +83,11 @@ static inline void spin_lock(spinlock_t * lock)
...
@@ -83,7 +83,11 @@ static inline void spin_lock(spinlock_t * lock)
:
"m"
(
lock
->
lock
)
:
"memory"
);
:
"m"
(
lock
->
lock
)
:
"memory"
);
}
}
#define spin_trylock(lock) (!test_and_set_bit(0,(lock)))
static
inline
int
_raw_spin_trylock
(
spinlock_t
*
lock
)
{
return
!
test_and_set_bit
(
0
,
&
lock
->
lock
);
}
#define spin_lock_own(LOCK, LOCATION) ((void)0)
#define spin_lock_own(LOCK, LOCATION) ((void)0)
#endif
/* CONFIG_DEBUG_SPINLOCK */
#endif
/* CONFIG_DEBUG_SPINLOCK */
...
@@ -98,10 +102,10 @@ typedef struct {
...
@@ -98,10 +102,10 @@ typedef struct {
#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
#if CONFIG_DEBUG_RWLOCK
#if CONFIG_DEBUG_RWLOCK
extern
void
write_lock
(
rwlock_t
*
lock
);
extern
void
_raw_
write_lock
(
rwlock_t
*
lock
);
extern
void
read_lock
(
rwlock_t
*
lock
);
extern
void
_raw_
read_lock
(
rwlock_t
*
lock
);
#else
#else
static
inline
void
write_lock
(
rwlock_t
*
lock
)
static
inline
void
_raw_
write_lock
(
rwlock_t
*
lock
)
{
{
long
regx
;
long
regx
;
...
@@ -121,7 +125,7 @@ static inline void write_lock(rwlock_t * lock)
...
@@ -121,7 +125,7 @@ static inline void write_lock(rwlock_t * lock)
:
"0"
(
*
(
volatile
int
*
)
lock
)
:
"memory"
);
:
"0"
(
*
(
volatile
int
*
)
lock
)
:
"memory"
);
}
}
static
inline
void
read_lock
(
rwlock_t
*
lock
)
static
inline
void
_raw_
read_lock
(
rwlock_t
*
lock
)
{
{
long
regx
;
long
regx
;
...
@@ -142,13 +146,13 @@ static inline void read_lock(rwlock_t * lock)
...
@@ -142,13 +146,13 @@ static inline void read_lock(rwlock_t * lock)
}
}
#endif
/* CONFIG_DEBUG_RWLOCK */
#endif
/* CONFIG_DEBUG_RWLOCK */
static
inline
void
write_unlock
(
rwlock_t
*
lock
)
static
inline
void
_raw_
write_unlock
(
rwlock_t
*
lock
)
{
{
mb
();
mb
();
*
(
volatile
int
*
)
lock
=
0
;
*
(
volatile
int
*
)
lock
=
0
;
}
}
static
inline
void
read_unlock
(
rwlock_t
*
lock
)
static
inline
void
_raw_
read_unlock
(
rwlock_t
*
lock
)
{
{
long
regx
;
long
regx
;
__asm__
__volatile__
(
__asm__
__volatile__
(
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment