Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
87e53587
Commit
87e53587
authored
Mar 04, 2003
by
Peter Chubb
Committed by
David Mosberger
Mar 04, 2003
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
[PATCH] ia64: Preemption patch against ~2.5.60
Latest premption patch.
parent
839fe15b
Changes
13
Expand all
Hide whitespace changes
Inline
Side-by-side
Showing
13 changed files
with
204 additions
and
54 deletions
+204
-54
arch/ia64/Kconfig
arch/ia64/Kconfig
+18
-0
arch/ia64/hp/sim/simserial.c
arch/ia64/hp/sim/simserial.c
+21
-21
arch/ia64/ia32/ia32_support.c
arch/ia64/ia32/ia32_support.c
+2
-1
arch/ia64/kernel/entry.S
arch/ia64/kernel/entry.S
+24
-1
arch/ia64/kernel/irq.c
arch/ia64/kernel/irq.c
+3
-1
arch/ia64/kernel/palinfo.c
arch/ia64/kernel/palinfo.c
+3
-1
arch/ia64/kernel/perfmon.c
arch/ia64/kernel/perfmon.c
+92
-15
arch/ia64/kernel/smp.c
arch/ia64/kernel/smp.c
+25
-3
arch/ia64/mm/fault.c
arch/ia64/mm/fault.c
+1
-1
arch/ia64/mm/tlb.c
arch/ia64/mm/tlb.c
+7
-3
include/asm-ia64/hardirq.h
include/asm-ia64/hardirq.h
+5
-5
include/asm-ia64/system.h
include/asm-ia64/system.h
+1
-1
include/asm-ia64/thread_info.h
include/asm-ia64/thread_info.h
+2
-1
No files found.
arch/ia64/Kconfig
View file @
87e53587
...
@@ -424,6 +424,18 @@ config SMP
...
@@ -424,6 +424,18 @@ config SMP
If you don't know what to do here, say N.
If you don't know what to do here, say N.
config PREEMPT
bool "Preemptible Kernel"
help
This option reduces the latency of the kernel when reacting to
real-time or interactive events by allowing a low priority process to
be preempted even if it is in kernel mode executing a system call.
This allows applications to run more reliably even when the system is
under load.
Say Y here if you are building a kernel for a desktop, embedded
or real-time system. Say N if you are unsure.
config IA32_SUPPORT
config IA32_SUPPORT
bool "Support running of Linux/x86 binaries"
bool "Support running of Linux/x86 binaries"
help
help
...
@@ -875,6 +887,12 @@ config DEBUG_SPINLOCK
...
@@ -875,6 +887,12 @@ config DEBUG_SPINLOCK
best used in conjunction with the NMI watchdog so that spinlock
best used in conjunction with the NMI watchdog so that spinlock
deadlocks are also debuggable.
deadlocks are also debuggable.
config DEBUG_SPINLOCK_SLEEP
bool "Sleep-inside-spinlock checking"
help
If you say Y here, various routines which may sleep will become very
noisy if they are called with a spinlock held.
config IA64_DEBUG_CMPXCHG
config IA64_DEBUG_CMPXCHG
bool "Turn on compare-and-exchange bug checking (slow!)"
bool "Turn on compare-and-exchange bug checking (slow!)"
depends on DEBUG_KERNEL
depends on DEBUG_KERNEL
...
...
arch/ia64/hp/sim/simserial.c
View file @
87e53587
...
@@ -63,7 +63,6 @@ extern void ia64_ssc_connect_irq (long intr, long irq);
...
@@ -63,7 +63,6 @@ extern void ia64_ssc_connect_irq (long intr, long irq);
static
char
*
serial_name
=
"SimSerial driver"
;
static
char
*
serial_name
=
"SimSerial driver"
;
static
char
*
serial_version
=
"0.6"
;
static
char
*
serial_version
=
"0.6"
;
static
spinlock_t
serial_lock
=
SPIN_LOCK_UNLOCKED
;
/*
/*
* This has been extracted from asm/serial.h. We need one eventually but
* This has been extracted from asm/serial.h. We need one eventually but
...
@@ -235,14 +234,14 @@ static void rs_put_char(struct tty_struct *tty, unsigned char ch)
...
@@ -235,14 +234,14 @@ static void rs_put_char(struct tty_struct *tty, unsigned char ch)
if
(
!
tty
||
!
info
->
xmit
.
buf
)
return
;
if
(
!
tty
||
!
info
->
xmit
.
buf
)
return
;
spin_lock_irqsave
(
&
serial_lock
,
flags
);
local_irq_save
(
flags
);
if
(
CIRC_SPACE
(
info
->
xmit
.
head
,
info
->
xmit
.
tail
,
SERIAL_XMIT_SIZE
)
==
0
)
{
if
(
CIRC_SPACE
(
info
->
xmit
.
head
,
info
->
xmit
.
tail
,
SERIAL_XMIT_SIZE
)
==
0
)
{
spin_unlock_irqrestore
(
&
serial_lock
,
flags
);
local_irq_restore
(
flags
);
return
;
return
;
}
}
info
->
xmit
.
buf
[
info
->
xmit
.
head
]
=
ch
;
info
->
xmit
.
buf
[
info
->
xmit
.
head
]
=
ch
;
info
->
xmit
.
head
=
(
info
->
xmit
.
head
+
1
)
&
(
SERIAL_XMIT_SIZE
-
1
);
info
->
xmit
.
head
=
(
info
->
xmit
.
head
+
1
)
&
(
SERIAL_XMIT_SIZE
-
1
);
spin_unlock_irqrestore
(
&
serial_lock
,
flags
);
local_irq_restore
(
flags
);
}
}
static
_INLINE_
void
transmit_chars
(
struct
async_struct
*
info
,
int
*
intr_done
)
static
_INLINE_
void
transmit_chars
(
struct
async_struct
*
info
,
int
*
intr_done
)
...
@@ -250,7 +249,8 @@ static _INLINE_ void transmit_chars(struct async_struct *info, int *intr_done)
...
@@ -250,7 +249,8 @@ static _INLINE_ void transmit_chars(struct async_struct *info, int *intr_done)
int
count
;
int
count
;
unsigned
long
flags
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
serial_lock
,
flags
);
local_irq_save
(
flags
);
if
(
info
->
x_char
)
{
if
(
info
->
x_char
)
{
char
c
=
info
->
x_char
;
char
c
=
info
->
x_char
;
...
@@ -293,7 +293,7 @@ static _INLINE_ void transmit_chars(struct async_struct *info, int *intr_done)
...
@@ -293,7 +293,7 @@ static _INLINE_ void transmit_chars(struct async_struct *info, int *intr_done)
info
->
xmit
.
tail
+=
count
;
info
->
xmit
.
tail
+=
count
;
}
}
out:
out:
spin_unlock_irqrestore
(
&
serial_lock
,
flags
);
local_irq_restore
(
flags
);
}
}
static
void
rs_flush_chars
(
struct
tty_struct
*
tty
)
static
void
rs_flush_chars
(
struct
tty_struct
*
tty
)
...
@@ -334,7 +334,7 @@ static int rs_write(struct tty_struct * tty, int from_user,
...
@@ -334,7 +334,7 @@ static int rs_write(struct tty_struct * tty, int from_user,
break
;
break
;
}
}
spin_lock_irqsave
(
&
serial_lock
,
flags
);
local_irq_save
(
flags
);
{
{
c1
=
CIRC_SPACE_TO_END
(
info
->
xmit
.
head
,
info
->
xmit
.
tail
,
c1
=
CIRC_SPACE_TO_END
(
info
->
xmit
.
head
,
info
->
xmit
.
tail
,
SERIAL_XMIT_SIZE
);
SERIAL_XMIT_SIZE
);
...
@@ -344,7 +344,7 @@ static int rs_write(struct tty_struct * tty, int from_user,
...
@@ -344,7 +344,7 @@ static int rs_write(struct tty_struct * tty, int from_user,
info
->
xmit
.
head
=
((
info
->
xmit
.
head
+
c
)
&
info
->
xmit
.
head
=
((
info
->
xmit
.
head
+
c
)
&
(
SERIAL_XMIT_SIZE
-
1
));
(
SERIAL_XMIT_SIZE
-
1
));
}
}
spin_unlock_irqrestore
(
&
serial_lock
,
flags
);
local_irq_restore
(
flags
);
buf
+=
c
;
buf
+=
c
;
count
-=
c
;
count
-=
c
;
...
@@ -352,7 +352,7 @@ static int rs_write(struct tty_struct * tty, int from_user,
...
@@ -352,7 +352,7 @@ static int rs_write(struct tty_struct * tty, int from_user,
}
}
up
(
&
tmp_buf_sem
);
up
(
&
tmp_buf_sem
);
}
else
{
}
else
{
spin_lock_irqsave
(
&
serial_lock
,
flags
);
local_irq_save
(
flags
);
while
(
1
)
{
while
(
1
)
{
c
=
CIRC_SPACE_TO_END
(
info
->
xmit
.
head
,
info
->
xmit
.
tail
,
SERIAL_XMIT_SIZE
);
c
=
CIRC_SPACE_TO_END
(
info
->
xmit
.
head
,
info
->
xmit
.
tail
,
SERIAL_XMIT_SIZE
);
if
(
count
<
c
)
if
(
count
<
c
)
...
@@ -367,7 +367,7 @@ static int rs_write(struct tty_struct * tty, int from_user,
...
@@ -367,7 +367,7 @@ static int rs_write(struct tty_struct * tty, int from_user,
count
-=
c
;
count
-=
c
;
ret
+=
c
;
ret
+=
c
;
}
}
spin_unlock_irqrestore
(
&
serial_lock
,
flags
);
local_irq_restore
(
flags
);
}
}
/*
/*
* Hey, we transmit directly from here in our case
* Hey, we transmit directly from here in our case
...
@@ -398,9 +398,9 @@ static void rs_flush_buffer(struct tty_struct *tty)
...
@@ -398,9 +398,9 @@ static void rs_flush_buffer(struct tty_struct *tty)
struct
async_struct
*
info
=
(
struct
async_struct
*
)
tty
->
driver_data
;
struct
async_struct
*
info
=
(
struct
async_struct
*
)
tty
->
driver_data
;
unsigned
long
flags
;
unsigned
long
flags
;
spin_lock_irqsave
(
&
serial_lock
,
flags
);
local_irq_save
(
flags
);
info
->
xmit
.
head
=
info
->
xmit
.
tail
=
0
;
info
->
xmit
.
head
=
info
->
xmit
.
tail
=
0
;
spin_unlock_irqrestore
(
&
serial_lock
,
flags
);
local_irq_restore
(
flags
);
wake_up_interruptible
(
&
tty
->
write_wait
);
wake_up_interruptible
(
&
tty
->
write_wait
);
...
@@ -573,7 +573,7 @@ static void shutdown(struct async_struct * info)
...
@@ -573,7 +573,7 @@ static void shutdown(struct async_struct * info)
state
->
irq
);
state
->
irq
);
#endif
#endif
spin_lock_irqsave
(
&
serial_lock
,
flags
);
local_irq_save
(
flags
);
{
{
/*
/*
* First unlink the serial port from the IRQ chain...
* First unlink the serial port from the IRQ chain...
...
@@ -611,7 +611,7 @@ static void shutdown(struct async_struct * info)
...
@@ -611,7 +611,7 @@ static void shutdown(struct async_struct * info)
info
->
flags
&=
~
ASYNC_INITIALIZED
;
info
->
flags
&=
~
ASYNC_INITIALIZED
;
}
}
spin_unlock_irqrestore
(
&
serial_lock
,
flags
);
local_irq_restore
(
flags
);
}
}
/*
/*
...
@@ -634,13 +634,13 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
...
@@ -634,13 +634,13 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
state
=
info
->
state
;
state
=
info
->
state
;
spin_lock_irqsave
(
&
serial_lock
,
flags
);
local_irq_save
(
flags
);
if
(
tty_hung_up_p
(
filp
))
{
if
(
tty_hung_up_p
(
filp
))
{
#ifdef SIMSERIAL_DEBUG
#ifdef SIMSERIAL_DEBUG
printk
(
"rs_close: hung_up
\n
"
);
printk
(
"rs_close: hung_up
\n
"
);
#endif
#endif
MOD_DEC_USE_COUNT
;
MOD_DEC_USE_COUNT
;
spin_unlock_irqrestore
(
&
serial_lock
,
flags
);
local_irq_restore
(
flags
);
return
;
return
;
}
}
#ifdef SIMSERIAL_DEBUG
#ifdef SIMSERIAL_DEBUG
...
@@ -665,11 +665,11 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
...
@@ -665,11 +665,11 @@ static void rs_close(struct tty_struct *tty, struct file * filp)
}
}
if
(
state
->
count
)
{
if
(
state
->
count
)
{
MOD_DEC_USE_COUNT
;
MOD_DEC_USE_COUNT
;
spin_unlock_irqrestore
(
&
serial_lock
,
flags
);
local_irq_restore
(
flags
);
return
;
return
;
}
}
info
->
flags
|=
ASYNC_CLOSING
;
info
->
flags
|=
ASYNC_CLOSING
;
spin_unlock_irqrestore
(
&
serial_lock
,
flags
);
local_irq_restore
(
flags
);
/*
/*
* Now we wait for the transmit buffer to clear; and we notify
* Now we wait for the transmit buffer to clear; and we notify
...
@@ -776,7 +776,7 @@ startup(struct async_struct *info)
...
@@ -776,7 +776,7 @@ startup(struct async_struct *info)
if
(
!
page
)
if
(
!
page
)
return
-
ENOMEM
;
return
-
ENOMEM
;
spin_lock_irqsave
(
&
serial_lock
,
flags
);
local_irq_save
(
flags
);
if
(
info
->
flags
&
ASYNC_INITIALIZED
)
{
if
(
info
->
flags
&
ASYNC_INITIALIZED
)
{
free_page
(
page
);
free_page
(
page
);
...
@@ -857,11 +857,11 @@ startup(struct async_struct *info)
...
@@ -857,11 +857,11 @@ startup(struct async_struct *info)
}
}
info
->
flags
|=
ASYNC_INITIALIZED
;
info
->
flags
|=
ASYNC_INITIALIZED
;
spin_unlock_irqrestore
(
&
serial_lock
,
flags
);
local_irq_restore
(
flags
);
return
0
;
return
0
;
errout:
errout:
spin_unlock_irqrestore
(
&
serial_lock
,
flags
);
local_irq_restore
(
flags
);
return
retval
;
return
retval
;
}
}
...
...
arch/ia64/ia32/ia32_support.c
View file @
87e53587
...
@@ -93,7 +93,7 @@ ia32_load_state (struct task_struct *t)
...
@@ -93,7 +93,7 @@ ia32_load_state (struct task_struct *t)
{
{
unsigned
long
eflag
,
fsr
,
fcr
,
fir
,
fdr
,
csd
,
ssd
,
tssd
;
unsigned
long
eflag
,
fsr
,
fcr
,
fir
,
fdr
,
csd
,
ssd
,
tssd
;
struct
pt_regs
*
regs
=
ia64_task_regs
(
t
);
struct
pt_regs
*
regs
=
ia64_task_regs
(
t
);
int
nr
=
smp_processor_id
();
/* LDT and TSS depend on CPU number: */
int
nr
=
get_cpu
();
/* LDT and TSS depend on CPU number: */
eflag
=
t
->
thread
.
eflag
;
eflag
=
t
->
thread
.
eflag
;
fsr
=
t
->
thread
.
fsr
;
fsr
=
t
->
thread
.
fsr
;
...
@@ -119,6 +119,7 @@ ia32_load_state (struct task_struct *t)
...
@@ -119,6 +119,7 @@ ia32_load_state (struct task_struct *t)
regs
->
r17
=
(
_TSS
(
nr
)
<<
48
)
|
(
_LDT
(
nr
)
<<
32
)
|
(
__u32
)
regs
->
r17
;
regs
->
r17
=
(
_TSS
(
nr
)
<<
48
)
|
(
_LDT
(
nr
)
<<
32
)
|
(
__u32
)
regs
->
r17
;
regs
->
r30
=
load_desc
(
_LDT
(
nr
));
/* LDTD */
regs
->
r30
=
load_desc
(
_LDT
(
nr
));
/* LDTD */
put_cpu
();
}
}
/*
/*
...
...
arch/ia64/kernel/entry.S
View file @
87e53587
...
@@ -586,10 +586,21 @@ GLOBAL_ENTRY(ia64_leave_kernel)
...
@@ -586,10 +586,21 @@ GLOBAL_ENTRY(ia64_leave_kernel)
//
work
.
need_resched
etc
.
mustn
't get changed by this CPU before it returns to
//
work
.
need_resched
etc
.
mustn
't get changed by this CPU before it returns to
//
user
-
or
fsys
-
mode
:
//
user
-
or
fsys
-
mode
:
(
pUStk
)
cmp.eq.unc
p6
,
p0
=
r0
,
r0
//
p6
<-
pUStk
(
pUStk
)
cmp.eq.unc
p6
,
p0
=
r0
,
r0
//
p6
<-
pUStk
#ifdef CONFIG_PREEMPT
rsm
psr
.
i
//
disable
interrupts
adds
r17
=
TI_FLAGS
+
IA64_TASK_SIZE
,
r13
(
pKStk
)
adds
r20
=
TI_PRE_COUNT
+
IA64_TASK_SIZE
,
r13
;;
(
pKStk
)
ld4
r21
=[
r20
]
//
preempt_count
->
r21
;;
(
pKStk
)
cmp4.eq
p6
,
p0
=
r21
,
r0
//
p6
<-
preempt_count
==
0
;;
#else // CONFIG_PREEMPT
(
pUStk
)
rsm
psr
.
i
(
pUStk
)
rsm
psr
.
i
;;
;;
(
pUStk
)
adds
r17
=
TI_FLAGS
+
IA64_TASK_SIZE
,
r13
(
pUStk
)
adds
r17
=
TI_FLAGS
+
IA64_TASK_SIZE
,
r13
;;
;;
#endif // CONFIG_PREEMPT
.
work_processed
:
.
work_processed
:
(
p6
)
ld4
r18
=[
r17
]
//
load
current_thread_info
()->
flags
(
p6
)
ld4
r18
=[
r17
]
//
load
current_thread_info
()->
flags
adds
r2
=
PT
(
R8
)+
16
,
r12
adds
r2
=
PT
(
R8
)+
16
,
r12
...
@@ -810,15 +821,27 @@ skip_rbs_switch:
...
@@ -810,15 +821,27 @@ skip_rbs_switch:
.
work_pending
:
.
work_pending
:
tbit.z
p6
,
p0
=
r18
,
TIF_NEED_RESCHED
//
current_thread_info
()->
need_resched
==
0
?
tbit.z
p6
,
p0
=
r18
,
TIF_NEED_RESCHED
//
current_thread_info
()->
need_resched
==
0
?
(
p6
)
br.cond.sptk.few
.
notify
(
p6
)
br.cond.sptk.few
.
notify
#ifdef CONFIG_PREEMPT
(
pKStk
)
dep
r21
=-
1
,
r0
,
PREEMPT_ACTIVE_BIT
,
1
;;
(
pKStk
)
st4
[
r20
]=
r21
ssm
psr
.
i
//
enable
interrupts
#endif
#if __GNUC__ < 3
#if __GNUC__ < 3
br.call.spnt.many
rp
=
invoke_schedule
br.call.spnt.many
rp
=
invoke_schedule
#else
#else
br.call.spnt.many
rp
=
schedule
br.call.spnt.many
rp
=
schedule
#endif
#endif
.
ret9
:
cmp.eq
p6
,
p0
=
r0
,
r0
//
p6
<-
1
.
ret9
:
cmp.eq
p6
,
p0
=
r0
,
r0
//
p6
<-
1
rsm
psr
.
i
rsm
psr
.
i
//
disable
interrupts
;;
;;
adds
r17
=
TI_FLAGS
+
IA64_TASK_SIZE
,
r13
adds
r17
=
TI_FLAGS
+
IA64_TASK_SIZE
,
r13
#if CONFIG_PREEMPT
(
pKStk
)
adds
r20
=
TI_PRE_COUNT
+
IA64_TASK_SIZE
,
r13
;;
(
pKStk
)
st4
[
r20
]=
r0
//
preempt_count
()
<-
0
#endif
br.cond.sptk.many
.
work_processed
//
re
-
check
br.cond.sptk.many
.
work_processed
//
re
-
check
.
notify
:
.
notify
:
...
...
arch/ia64/kernel/irq.c
View file @
87e53587
...
@@ -340,12 +340,14 @@ unsigned int do_IRQ(unsigned long irq, struct pt_regs *regs)
...
@@ -340,12 +340,14 @@ unsigned int do_IRQ(unsigned long irq, struct pt_regs *regs)
* 0 return value means that this irq is already being
* 0 return value means that this irq is already being
* handled by some other CPU. (or is disabled)
* handled by some other CPU. (or is disabled)
*/
*/
int
cpu
=
smp_processor_id
()
;
int
cpu
;
irq_desc_t
*
desc
=
irq_desc
(
irq
);
irq_desc_t
*
desc
=
irq_desc
(
irq
);
struct
irqaction
*
action
;
struct
irqaction
*
action
;
unsigned
int
status
;
unsigned
int
status
;
irq_enter
();
irq_enter
();
cpu
=
smp_processor_id
();
kstat_cpu
(
cpu
).
irqs
[
irq
]
++
;
kstat_cpu
(
cpu
).
irqs
[
irq
]
++
;
if
(
desc
->
status
&
IRQ_PER_CPU
)
{
if
(
desc
->
status
&
IRQ_PER_CPU
)
{
...
...
arch/ia64/kernel/palinfo.c
View file @
87e53587
...
@@ -894,11 +894,13 @@ palinfo_read_entry(char *page, char **start, off_t off, int count, int *eof, voi
...
@@ -894,11 +894,13 @@ palinfo_read_entry(char *page, char **start, off_t off, int count, int *eof, voi
* in SMP mode, we may need to call another CPU to get correct
* in SMP mode, we may need to call another CPU to get correct
* information. PAL, by definition, is processor specific
* information. PAL, by definition, is processor specific
*/
*/
if
(
f
->
req_cpu
==
smp_processor_id
())
if
(
f
->
req_cpu
==
get_cpu
())
len
=
(
*
palinfo_entries
[
f
->
func_id
].
proc_read
)(
page
);
len
=
(
*
palinfo_entries
[
f
->
func_id
].
proc_read
)(
page
);
else
else
len
=
palinfo_handle_smp
(
f
,
page
);
len
=
palinfo_handle_smp
(
f
,
page
);
put_cpu
();
if
(
len
<=
off
+
count
)
*
eof
=
1
;
if
(
len
<=
off
+
count
)
*
eof
=
1
;
*
start
=
page
+
off
;
*
start
=
page
+
off
;
...
...
arch/ia64/kernel/perfmon.c
View file @
87e53587
This diff is collapsed.
Click to expand it.
arch/ia64/kernel/smp.c
View file @
87e53587
...
@@ -90,7 +90,7 @@ stop_this_cpu (void)
...
@@ -90,7 +90,7 @@ stop_this_cpu (void)
void
void
handle_IPI
(
int
irq
,
void
*
dev_id
,
struct
pt_regs
*
regs
)
handle_IPI
(
int
irq
,
void
*
dev_id
,
struct
pt_regs
*
regs
)
{
{
int
this_cpu
=
smp_processor_id
();
int
this_cpu
=
get_cpu
();
unsigned
long
*
pending_ipis
=
&
__get_cpu_var
(
ipi_operation
);
unsigned
long
*
pending_ipis
=
&
__get_cpu_var
(
ipi_operation
);
unsigned
long
ops
;
unsigned
long
ops
;
...
@@ -146,8 +146,12 @@ handle_IPI (int irq, void *dev_id, struct pt_regs *regs)
...
@@ -146,8 +146,12 @@ handle_IPI (int irq, void *dev_id, struct pt_regs *regs)
}
while
(
ops
);
}
while
(
ops
);
mb
();
/* Order data access and bit testing. */
mb
();
/* Order data access and bit testing. */
}
}
put_cpu
();
}
}
/*
* Called with preeemption disabled
*/
static
inline
void
static
inline
void
send_IPI_single
(
int
dest_cpu
,
int
op
)
send_IPI_single
(
int
dest_cpu
,
int
op
)
{
{
...
@@ -155,6 +159,9 @@ send_IPI_single (int dest_cpu, int op)
...
@@ -155,6 +159,9 @@ send_IPI_single (int dest_cpu, int op)
platform_send_ipi
(
dest_cpu
,
IA64_IPI_VECTOR
,
IA64_IPI_DM_INT
,
0
);
platform_send_ipi
(
dest_cpu
,
IA64_IPI_VECTOR
,
IA64_IPI_DM_INT
,
0
);
}
}
/*
* Called with preeemption disabled
*/
static
inline
void
static
inline
void
send_IPI_allbutself
(
int
op
)
send_IPI_allbutself
(
int
op
)
{
{
...
@@ -166,6 +173,9 @@ send_IPI_allbutself (int op)
...
@@ -166,6 +173,9 @@ send_IPI_allbutself (int op)
}
}
}
}
/*
* Called with preeemption disabled
*/
static
inline
void
static
inline
void
send_IPI_all
(
int
op
)
send_IPI_all
(
int
op
)
{
{
...
@@ -176,12 +186,18 @@ send_IPI_all (int op)
...
@@ -176,12 +186,18 @@ send_IPI_all (int op)
send_IPI_single
(
i
,
op
);
send_IPI_single
(
i
,
op
);
}
}
/*
* Called with preeemption disabled
*/
static
inline
void
static
inline
void
send_IPI_self
(
int
op
)
send_IPI_self
(
int
op
)
{
{
send_IPI_single
(
smp_processor_id
(),
op
);
send_IPI_single
(
smp_processor_id
(),
op
);
}
}
/*
* Called with preeemption disabled
*/
void
void
smp_send_reschedule
(
int
cpu
)
smp_send_reschedule
(
int
cpu
)
{
{
...
@@ -197,12 +213,15 @@ void
...
@@ -197,12 +213,15 @@ void
smp_send_reschedule_all
(
void
)
smp_send_reschedule_all
(
void
)
{
{
int
i
;
int
i
;
int
cpu
=
get_cpu
();
/* disable preemption */
for
(
i
=
0
;
i
<
NR_CPUS
;
i
++
)
for
(
i
=
0
;
i
<
NR_CPUS
;
i
++
)
if
(
cpu_online
(
i
)
&&
i
!=
smp_processor_id
()
)
if
(
cpu_online
(
i
)
&&
i
!=
cpu
)
smp_send_reschedule
(
i
);
smp_send_reschedule
(
i
);
put_cpu
();
}
}
void
void
smp_flush_tlb_all
(
void
)
smp_flush_tlb_all
(
void
)
{
{
...
@@ -247,9 +266,11 @@ smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int
...
@@ -247,9 +266,11 @@ smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int
{
{
struct
call_data_struct
data
;
struct
call_data_struct
data
;
int
cpus
=
1
;
int
cpus
=
1
;
int
me
=
get_cpu
();
/* prevent preemption and reschedule on another processor */
if
(
cpuid
==
smp_processor_id
()
)
{
if
(
cpuid
==
me
)
{
printk
(
"%s: trying to call self
\n
"
,
__FUNCTION__
);
printk
(
"%s: trying to call self
\n
"
,
__FUNCTION__
);
put_cpu
();
return
-
EBUSY
;
return
-
EBUSY
;
}
}
...
@@ -276,6 +297,7 @@ smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int
...
@@ -276,6 +297,7 @@ smp_call_function_single (int cpuid, void (*func) (void *info), void *info, int
call_data
=
NULL
;
call_data
=
NULL
;
spin_unlock_bh
(
&
call_lock
);
spin_unlock_bh
(
&
call_lock
);
put_cpu
();
return
0
;
return
0
;
}
}
...
...
arch/ia64/mm/fault.c
View file @
87e53587
...
@@ -55,7 +55,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
...
@@ -55,7 +55,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
/*
/*
* If we're in an interrupt or have no user context, we must not take the fault..
* If we're in an interrupt or have no user context, we must not take the fault..
*/
*/
if
(
in_
interrupt
()
||
!
mm
)
if
(
in_
atomic
()
||
!
mm
)
goto
no_context
;
goto
no_context
;
down_read
(
&
mm
->
mmap_sem
);
down_read
(
&
mm
->
mmap_sem
);
...
...
arch/ia64/mm/tlb.c
View file @
87e53587
...
@@ -81,9 +81,13 @@ wrap_mmu_context (struct mm_struct *mm)
...
@@ -81,9 +81,13 @@ wrap_mmu_context (struct mm_struct *mm)
}
}
read_unlock
(
&
tasklist_lock
);
read_unlock
(
&
tasklist_lock
);
/* can't call flush_tlb_all() here because of race condition with O(1) scheduler [EF] */
/* can't call flush_tlb_all() here because of race condition with O(1) scheduler [EF] */
for
(
i
=
0
;
i
<
NR_CPUS
;
++
i
)
{
if
(
i
!=
smp_processor_id
())
int
cpu
=
get_cpu
();
/* prevent preemption/migration */
per_cpu
(
ia64_need_tlb_flush
,
i
)
=
1
;
for
(
i
=
0
;
i
<
NR_CPUS
;
++
i
)
if
(
i
!=
cpu
)
per_cpu
(
ia64_need_tlb_flush
,
i
)
=
1
;
put_cpu
();
}
local_flush_tlb_all
();
local_flush_tlb_all
();
}
}
...
...
include/asm-ia64/hardirq.h
View file @
87e53587
...
@@ -32,18 +32,18 @@
...
@@ -32,18 +32,18 @@
*
*
* - bits 0-7 are the preemption count (max preemption depth: 256)
* - bits 0-7 are the preemption count (max preemption depth: 256)
* - bits 8-15 are the softirq count (max # of softirqs: 256)
* - bits 8-15 are the softirq count (max # of softirqs: 256)
* - bits 16-
31 are the hardirq count (max # of hardirqs: 65536
)
* - bits 16-
29 are the hardirq count (max # of hardirqs: 16384
)
*
*
* - (bit 63 is the PREEMPT_ACTIVE flag---not currently implemented.)
* - (bit 63 is the PREEMPT_ACTIVE flag---not currently implemented.)
*
*
* PREEMPT_MASK: 0x000000ff
* PREEMPT_MASK: 0x000000ff
* SOFTIRQ_MASK: 0x0000ff00
* SOFTIRQ_MASK: 0x0000ff00
* HARDIRQ_MASK: 0x
f
fff0000
* HARDIRQ_MASK: 0x
3
fff0000
*/
*/
#define PREEMPT_BITS 8
#define PREEMPT_BITS 8
#define SOFTIRQ_BITS 8
#define SOFTIRQ_BITS 8
#define HARDIRQ_BITS 1
6
#define HARDIRQ_BITS 1
4
#define PREEMPT_SHIFT 0
#define PREEMPT_SHIFT 0
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
...
@@ -83,13 +83,13 @@
...
@@ -83,13 +83,13 @@
#define hardirq_trylock() (!in_interrupt())
#define hardirq_trylock() (!in_interrupt())
#define hardirq_endlock() do { } while (0)
#define hardirq_endlock() do { } while (0)
#define in_atomic() (preempt_count() != 0)
#define irq_enter() (preempt_count() += HARDIRQ_OFFSET)
#define irq_enter() (preempt_count() += HARDIRQ_OFFSET)
#if CONFIG_PREEMPT
#if CONFIG_PREEMPT
#
error CONFIG_PREEMT currently not supported.
#
define in_atomic() ((preempt_count() & ~PREEMPT_ACTIVE) != kernel_locked())
# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
#else
#else
# define in_atomic() (preempt_count() != 0)
# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
#endif
#endif
...
...
include/asm-ia64/system.h
View file @
87e53587
...
@@ -206,7 +206,7 @@ extern void ia64_load_extra (struct task_struct *task);
...
@@ -206,7 +206,7 @@ extern void ia64_load_extra (struct task_struct *task);
#ifdef CONFIG_PERFMON
#ifdef CONFIG_PERFMON
DECLARE_PER_CPU
(
unsigned
long
,
pfm_syst_info
);
DECLARE_PER_CPU
(
unsigned
long
,
pfm_syst_info
);
# define PERFMON_IS_SYSWIDE() (get_cpu_var(pfm_syst_info) & 0x1)
# define PERFMON_IS_SYSWIDE() (
__
get_cpu_var(pfm_syst_info) & 0x1)
#else
#else
# define PERFMON_IS_SYSWIDE() (0)
# define PERFMON_IS_SYSWIDE() (0)
#endif
#endif
...
...
include/asm-ia64/thread_info.h
View file @
87e53587
...
@@ -15,7 +15,8 @@
...
@@ -15,7 +15,8 @@
#define TI_ADDR_LIMIT 0x10
#define TI_ADDR_LIMIT 0x10
#define TI_PRE_COUNT 0x18
#define TI_PRE_COUNT 0x18
#define PREEMPT_ACTIVE 0x4000000
#define PREEMPT_ACTIVE_BIT 30
#define PREEMPT_ACTIVE (1 << PREEMPT_ACTIVE_BIT)
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLY__
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment