Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
8e70b6f7
Commit
8e70b6f7
authored
Jul 27, 2002
by
Paul Mackerras
Browse files
Options
Browse Files
Download
Plain Diff
Merge samba.org:/home/paulus/kernel/linux-2.5
into samba.org:/home/paulus/kernel/for-linus-ppc
parents
b0ab8396
889d64d5
Changes
14
Hide whitespace changes
Inline
Side-by-side
Showing
14 changed files
with
91 additions
and
439 deletions
+91
-439
arch/ppc/kernel/irq.c
arch/ppc/kernel/irq.c
+9
-279
arch/ppc/kernel/ppc_ksyms.c
arch/ppc/kernel/ppc_ksyms.c
+0
-6
arch/ppc/kernel/temp.c
arch/ppc/kernel/temp.c
+7
-7
arch/ppc/kernel/time.c
arch/ppc/kernel/time.c
+7
-10
arch/ppc/mm/mmu_decl.h
arch/ppc/mm/mmu_decl.h
+3
-1
arch/ppc/mm/pgtable.c
arch/ppc/mm/pgtable.c
+7
-24
arch/ppc/mm/ppc_mmu.c
arch/ppc/mm/ppc_mmu.c
+7
-1
arch/ppc/platforms/iSeries_pic.c
arch/ppc/platforms/iSeries_pic.c
+2
-2
arch/ppc/platforms/iSeries_time.c
arch/ppc/platforms/iSeries_time.c
+2
-2
include/asm-ppc/hardirq.h
include/asm-ppc/hardirq.h
+9
-76
include/asm-ppc/pgtable.h
include/asm-ppc/pgtable.h
+17
-0
include/asm-ppc/smplock.h
include/asm-ppc/smplock.h
+1
-1
include/asm-ppc/softirq.h
include/asm-ppc/softirq.h
+17
-19
include/asm-ppc/system.h
include/asm-ppc/system.h
+3
-11
No files found.
arch/ppc/kernel/irq.c
View file @
8e70b6f7
...
...
@@ -178,15 +178,6 @@ setup_irq(unsigned int irq, struct irqaction * new)
return
0
;
}
#if (defined(CONFIG_8xx) || defined(CONFIG_8260))
/* Name change so we can catch standard drivers that potentially mess up
* the internal interrupt controller on 8xx and 8260. Just bear with me,
* I don't like this either and I am searching a better solution. For
* now, this is what I need. -- Dan
*/
#define request_irq request_8xxirq
#endif
void
free_irq
(
unsigned
int
irq
,
void
*
dev_id
)
{
irq_desc_t
*
desc
;
...
...
@@ -212,11 +203,7 @@ void free_irq(unsigned int irq, void* dev_id)
}
spin_unlock_irqrestore
(
&
desc
->
lock
,
flags
);
#ifdef CONFIG_SMP
/* Wait to make sure it's not being used on another CPU */
while
(
desc
->
status
&
IRQ_INPROGRESS
)
barrier
();
#endif
synchronize_irq
(
irq
);
irq_kfree
(
action
);
return
;
}
...
...
@@ -289,8 +276,8 @@ int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *)
*
* This function may be called from IRQ context.
*/
void
disable_irq_nosync
(
unsigned
int
irq
)
void
disable_irq_nosync
(
unsigned
int
irq
)
{
irq_desc_t
*
desc
=
irq_desc
+
irq
;
unsigned
long
flags
;
...
...
@@ -320,12 +307,7 @@ int request_irq(unsigned int irq, void (*handler)(int, void *, struct pt_regs *)
void
disable_irq
(
unsigned
int
irq
)
{
disable_irq_nosync
(
irq
);
if
(
!
local_irq_count
(
smp_processor_id
()))
{
do
{
barrier
();
}
while
(
irq_desc
[
irq
].
status
&
IRQ_INPROGRESS
);
}
synchronize_irq
(
irq
);
}
/**
...
...
@@ -529,7 +511,7 @@ int do_IRQ(struct pt_regs *regs)
{
int
cpu
=
smp_processor_id
();
int
irq
,
first
=
1
;
hardirq_enter
(
cpu
);
irq_enter
(
);
/*
* Every platform is required to implement ppc_md.get_irq.
...
...
@@ -546,7 +528,7 @@ int do_IRQ(struct pt_regs *regs)
if
(
irq
!=
-
2
&&
first
)
/* That's not SMP safe ... but who cares ? */
ppc_spurious_interrupts
++
;
hardirq_exit
(
cpu
);
irq_exit
(
);
if
(
softirq_pending
(
cpu
))
do_softirq
();
...
...
@@ -582,262 +564,10 @@ void __init init_IRQ(void)
}
#ifdef CONFIG_SMP
unsigned
char
global_irq_holder
=
NO_PROC_ID
;
unsigned
volatile
long
global_irq_lock
;
/* pendantic :long for set_bit--RR*/
atomic_t
global_bh_count
;
static
void
show
(
char
*
str
)
{
int
cpu
=
smp_processor_id
();
printk
(
"
\n
%s, CPU %d:
\n
"
,
str
,
cpu
);
printk
(
"irq: [%d %d]
\n
"
,
local_irq_count
(
0
),
local_irq_count
(
1
));
printk
(
"bh: %d [%d %d]
\n
"
,
atomic_read
(
&
global_bh_count
),
local_bh_count
(
0
),
local_bh_count
(
1
));
}
static
inline
void
wait_on_bh
(
void
)
{
int
count
=
MAXCOUNT
;
do
{
if
(
!--
count
)
{
show
(
"wait_on_bh"
);
count
=
~
0
;
}
/* nothing .. wait for the other bh's to go away */
}
while
(
atomic_read
(
&
global_bh_count
)
!=
0
);
}
static
inline
void
wait_on_irq
(
int
cpu
)
{
int
count
=
MAXCOUNT
;
for
(;;)
{
/*
* Wait until all interrupts are gone. Wait
* for bottom half handlers unless we're
* already executing in one..
*/
if
(
!
irqs_running
())
if
(
local_bh_count
(
cpu
)
||
!
spin_is_locked
(
&
global_bh_lock
))
break
;
/* Duh, we have to loop. Release the lock to avoid deadlocks */
clear_bit
(
0
,
&
global_irq_lock
);
for
(;;)
{
if
(
!--
count
)
{
show
(
"wait_on_irq"
);
count
=
~
0
;
}
local_irq_enable
();
/*
* We have to allow irqs to arrive between local_irq_enable and local_irq_disable
* Some cpus apparently won't cause the interrupt
* for several instructions. We hope that isync will
* catch this --Troy
*/
__asm__
__volatile__
(
"isync"
);
local_irq_disable
();
if
(
irqs_running
())
continue
;
if
(
global_irq_lock
)
continue
;
if
(
!
local_bh_count
(
cpu
)
&&
spin_is_locked
(
&
global_bh_lock
))
continue
;
if
(
!
test_and_set_bit
(
0
,
&
global_irq_lock
))
break
;
}
}
}
/*
* This is called when we want to synchronize with
* bottom half handlers. We need to wait until
* no other CPU is executing any bottom half handler.
*
* Don't wait if we're already running in an interrupt
* context or are inside a bh handler.
*/
void
synchronize_bh
(
void
)
{
if
(
atomic_read
(
&
global_bh_count
)
&&
!
in_interrupt
())
wait_on_bh
();
}
/*
* This is called when we want to synchronize with
* interrupts. We may for example tell a device to
* stop sending interrupts: but to make sure there
* are no interrupts that are executing on another
* CPU we need to call this function.
*/
void
synchronize_irq
(
void
)
{
if
(
irqs_running
())
{
/* Stupid approach */
cli
();
sti
();
}
}
static
inline
void
get_irqlock
(
int
cpu
)
void
synchronize_irq
(
unsigned
int
irq
)
{
unsigned
int
loops
=
MAXCOUNT
;
if
(
test_and_set_bit
(
0
,
&
global_irq_lock
))
{
/* do we already hold the lock? */
if
((
unsigned
char
)
cpu
==
global_irq_holder
)
return
;
/* Uhhuh.. Somebody else got it. Wait.. */
do
{
do
{
if
(
loops
--
==
0
)
{
printk
(
"get_irqlock(%d) waiting, global_irq_holder=%d
\n
"
,
cpu
,
global_irq_holder
);
#ifdef CONFIG_XMON
xmon
(
0
);
#endif
}
}
while
(
test_bit
(
0
,
&
global_irq_lock
));
}
while
(
test_and_set_bit
(
0
,
&
global_irq_lock
));
}
/*
* We also need to make sure that nobody else is running
* in an interrupt context.
*/
wait_on_irq
(
cpu
);
/*
* Ok, finally..
*/
global_irq_holder
=
cpu
;
}
/*
* A global "cli()" while in an interrupt context
* turns into just a local cli(). Interrupts
* should use spinlocks for the (very unlikely)
* case that they ever want to protect against
* each other.
*
* If we already have local interrupts disabled,
* this will not turn a local disable into a
* global one (problems with spinlocks: this makes
* save_flags+cli+sti usable inside a spinlock).
*/
void
__global_cli
(
void
)
{
unsigned
long
flags
;
local_save_flags
(
flags
);
if
(
flags
&
(
1
<<
15
))
{
int
cpu
=
smp_processor_id
();
local_irq_disable
();
if
(
!
local_irq_count
(
cpu
))
get_irqlock
(
cpu
);
}
}
void
__global_sti
(
void
)
{
int
cpu
=
smp_processor_id
();
if
(
!
local_irq_count
(
cpu
))
release_irqlock
(
cpu
);
local_irq_enable
();
}
/*
* SMP flags value to restore to:
* 0 - global cli
* 1 - global sti
* 2 - local cli
* 3 - local sti
*/
unsigned
long
__global_save_flags
(
void
)
{
int
retval
;
int
local_enabled
;
unsigned
long
flags
;
local_save_flags
(
flags
);
local_enabled
=
(
flags
>>
15
)
&
1
;
/* default to local */
retval
=
2
+
local_enabled
;
/* check for global flags if we're not in an interrupt */
if
(
!
local_irq_count
(
smp_processor_id
()))
{
if
(
local_enabled
)
retval
=
1
;
if
(
global_irq_holder
==
(
unsigned
char
)
smp_processor_id
())
retval
=
0
;
}
return
retval
;
}
int
tb
(
long
vals
[],
int
max_size
)
{
register
unsigned
long
*
orig_sp
__asm__
(
"r1"
);
register
unsigned
long
lr
__asm__
(
"r3"
);
unsigned
long
*
sp
;
int
i
;
asm
volatile
(
"mflr 3"
);
vals
[
0
]
=
lr
;
sp
=
(
unsigned
long
*
)
*
orig_sp
;
sp
=
(
unsigned
long
*
)
*
sp
;
for
(
i
=
1
;
i
<
max_size
;
i
++
)
{
if
(
sp
==
0
)
{
break
;
}
vals
[
i
]
=
*
(
sp
+
1
);
sp
=
(
unsigned
long
*
)
*
sp
;
}
return
i
;
}
void
__global_restore_flags
(
unsigned
long
flags
)
{
switch
(
flags
)
{
case
0
:
__global_cli
();
break
;
case
1
:
__global_sti
();
break
;
case
2
:
local_irq_disable
();
break
;
case
3
:
local_irq_enable
();
break
;
default:
{
unsigned
long
trace
[
5
];
int
count
;
int
i
;
printk
(
"global_restore_flags: %08lx (%08lx)
\n
"
,
flags
,
(
&
flags
)[
-
1
]);
count
=
tb
(
trace
,
5
);
printk
(
"tb:"
);
for
(
i
=
0
;
i
<
count
;
i
++
)
{
printk
(
" %8.8lx"
,
trace
[
i
]);
}
printk
(
"
\n
"
);
}
}
while
(
irq_desc
[
irq
].
status
&
IRQ_INPROGRESS
)
barrier
();
}
#endif
/* CONFIG_SMP */
...
...
arch/ppc/kernel/ppc_ksyms.c
View file @
8e70b6f7
...
...
@@ -207,12 +207,6 @@ EXPORT_SYMBOL(last_task_used_altivec);
EXPORT_SYMBOL
(
giveup_altivec
);
#endif
/* CONFIG_ALTIVEC */
#ifdef CONFIG_SMP
EXPORT_SYMBOL
(
global_irq_lock
);
EXPORT_SYMBOL
(
global_irq_holder
);
EXPORT_SYMBOL
(
__global_cli
);
EXPORT_SYMBOL
(
__global_sti
);
EXPORT_SYMBOL
(
__global_save_flags
);
EXPORT_SYMBOL
(
__global_restore_flags
);
#ifdef CONFIG_DEBUG_SPINLOCK
EXPORT_SYMBOL
(
_raw_spin_lock
);
EXPORT_SYMBOL
(
_raw_spin_unlock
);
...
...
arch/ppc/kernel/temp.c
View file @
8e70b6f7
...
...
@@ -124,27 +124,27 @@ void TAUupdate(int cpu)
void
TAUException
(
struct
pt_regs
*
regs
)
{
unsigned
long
cpu
=
smp_processor_id
();
int
cpu
=
smp_processor_id
();
hardirq_enter
(
cpu
);
irq_enter
(
);
tau
[
cpu
].
interrupts
++
;
TAUupdate
(
cpu
);
hardirq_exit
(
cpu
);
return
;
irq_exit
();
}
#endif
/* CONFIG_TAU_INT */
static
void
tau_timeout
(
void
*
info
)
{
unsigned
long
cpu
=
smp_processor_id
()
;
int
cpu
;
unsigned
long
flags
;
int
size
;
int
shrink
;
/* disabling interrupts *should* be okay */
save_flags
(
flags
);
cli
();
local_irq_save
(
flags
);
cpu
=
smp_processor_id
();
#ifndef CONFIG_TAU_INT
TAUupdate
(
cpu
);
...
...
@@ -186,7 +186,7 @@ static void tau_timeout(void * info)
*/
mtspr
(
SPRN_THRM3
,
THRM3_SITV
(
500
*
60
)
|
THRM3_E
);
restore_flags
(
flags
);
local_irq_restore
(
flags
);
}
static
void
tau_timeout_smp
(
unsigned
long
unused
)
...
...
arch/ppc/kernel/time.c
View file @
8e70b6f7
...
...
@@ -75,7 +75,7 @@ u64 jiffies_64;
unsigned
long
disarm_decr
[
NR_CPUS
];
extern
int
do_sys_settimeofday
(
struct
timeval
*
tv
,
struct
timezone
*
tz
)
;
extern
struct
timezone
sys_tz
;
/* keep track of when we need to update the rtc */
time_t
last_rtc_update
;
...
...
@@ -161,7 +161,7 @@ int timer_interrupt(struct pt_regs * regs)
if
(
atomic_read
(
&
ppc_n_lost_interrupts
)
!=
0
)
do_IRQ
(
regs
);
hardirq_enter
(
cpu
);
irq_enter
(
);
while
((
next_dec
=
tb_ticks_per_jiffy
-
tb_delta
(
&
jiffy_stamp
))
<
0
)
{
jiffy_stamp
+=
tb_ticks_per_jiffy
;
...
...
@@ -214,7 +214,7 @@ int timer_interrupt(struct pt_regs * regs)
if
(
ppc_md
.
heartbeat
&&
!
ppc_md
.
heartbeat_count
--
)
ppc_md
.
heartbeat
();
hardirq_exit
(
cpu
);
irq_exit
(
);
if
(
softirq_pending
(
cpu
))
do_softirq
();
...
...
@@ -358,14 +358,11 @@ void __init time_init(void)
/* Not exact, but the timer interrupt takes care of this */
set_dec
(
tb_ticks_per_jiffy
);
/* If platform provided a timezone (pmac), we correct the time
* using do_sys_settimeofday() which in turn calls warp_clock()
*/
/* If platform provided a timezone (pmac), we correct the time */
if
(
time_offset
)
{
struct
timezone
tz
;
tz
.
tz_minuteswest
=
-
time_offset
/
60
;
tz
.
tz_dsttime
=
0
;
do_sys_settimeofday
(
NULL
,
&
tz
);
sys_tz
.
tz_minuteswest
=
-
time_offset
/
60
;
sys_tz
.
tz_dsttime
=
0
;
xtime
.
tv_sec
-=
time_offset
;
}
}
...
...
arch/ppc/mm/mmu_decl.h
View file @
8e70b6f7
...
...
@@ -25,7 +25,6 @@
#include <asm/tlbflush.h>
extern
void
mapin_ram
(
void
);
extern
void
bat_mapin_ram
(
void
);
extern
int
map_page
(
unsigned
long
va
,
unsigned
long
pa
,
int
flags
);
extern
void
setbat
(
int
index
,
unsigned
long
virt
,
unsigned
long
phys
,
unsigned
int
size
,
int
flags
);
...
...
@@ -49,14 +48,17 @@ extern unsigned long Hash_size, Hash_mask;
#if defined(CONFIG_8xx)
#define flush_HPTE(X, va, pg) _tlbie(va)
#define MMU_init_hw() do { } while(0)
#define mmu_mapin_ram() (0UL)
#elif defined(CONFIG_4xx)
#define flush_HPTE(X, va, pg) _tlbie(va)
extern
void
MMU_init_hw
(
void
);
#define mmu_mapin_ram() (0UL)
#else
/* anything except 4xx or 8xx */
extern
void
MMU_init_hw
(
void
);
extern
unsigned
long
mmu_mapin_ram
(
void
);
/* Be careful....this needs to be updated if we ever encounter 603 SMPs,
* which includes all new 82xx processors. We need tlbie/tlbsync here
...
...
arch/ppc/mm/pgtable.c
View file @
8e70b6f7
...
...
@@ -252,31 +252,14 @@ void __init mapin_ram(void)
{
unsigned
long
v
,
p
,
s
,
f
;
#ifdef HAVE_BATS
if
(
!
__map_without_bats
)
bat_mapin_ram
();
#endif
/* HAVE_BATS */
v
=
KERNELBASE
;
p
=
PPC_MEMSTART
;
for
(
s
=
0
;
s
<
total_lowmem
;
s
+=
PAGE_SIZE
)
{
/* On the MPC8xx, we want the page shared so we
* don't get ASID compares on kernel space.
*/
f
=
_PAGE_PRESENT
|
_PAGE_ACCESSED
|
_PAGE_SHARED
|
_PAGE_HWEXEC
;
#if defined(CONFIG_KGDB) || defined(CONFIG_XMON)
/* Allows stub to set breakpoints everywhere */
f
|=
_PAGE_WRENABLE
;
#else
/* !CONFIG_KGDB && !CONFIG_XMON */
if
((
char
*
)
v
<
_stext
||
(
char
*
)
v
>=
etext
)
f
|=
_PAGE_WRENABLE
;
#ifdef CONFIG_PPC_STD_MMU
s
=
mmu_mapin_ram
();
v
=
KERNELBASE
+
s
;
p
=
PPC_MEMSTART
+
s
;
for
(;
s
<
total_lowmem
;
s
+=
PAGE_SIZE
)
{
if
((
char
*
)
v
>=
_stext
&&
(
char
*
)
v
<
etext
)
f
=
_PAGE_RAM_TEXT
;
else
/* On the powerpc (not all), no user access
forces R/W kernel access */
f
|=
_PAGE_USER
;
#endif
/* CONFIG_PPC_STD_MMU */
#endif
/* CONFIG_KGDB || CONFIG_XMON */
f
=
_PAGE_RAM
;
map_page
(
v
,
p
,
f
);
v
+=
PAGE_SIZE
;
p
+=
PAGE_SIZE
;
...
...
arch/ppc/mm/ppc_mmu.c
View file @
8e70b6f7
...
...
@@ -87,12 +87,15 @@ unsigned long p_mapped_by_bats(unsigned long pa)
return
0
;
}
void
__init
bat
_mapin_ram
(
void
)
unsigned
long
__init
mmu
_mapin_ram
(
void
)
{
unsigned
long
tot
,
bl
,
done
;
unsigned
long
max_size
=
(
256
<<
20
);
unsigned
long
align
;
if
(
__map_without_bats
)
return
0
;
/* Set up BAT2 and if necessary BAT3 to cover RAM. */
/* Make sure we don't map a block larger than the
...
...
@@ -119,7 +122,10 @@ void __init bat_mapin_ram(void)
break
;
setbat
(
3
,
KERNELBASE
+
done
,
PPC_MEMSTART
+
done
,
bl
,
_PAGE_KERNEL
);
done
=
(
unsigned
long
)
bat_addrs
[
3
].
limit
-
KERNELBASE
+
1
;
}
return
done
;
}
/*
...
...
arch/ppc/platforms/iSeries_pic.c
View file @
8e70b6f7
...
...
@@ -52,7 +52,7 @@ int do_IRQ(struct pt_regs *regs)
if
(
is_soft_enabled
()
)
BUG
();
hardirq_enter
(
cpu
);
irq_enter
(
);
paca
=
(
struct
Paca
*
)
mfspr
(
SPRG1
);
...
...
@@ -71,7 +71,7 @@ int do_IRQ(struct pt_regs *regs)
local_irq_restore
(
flags
);
}
hardirq_exit
(
cpu
);
irq_exit
(
);
if
(
paca
->
xLpPacaPtr
->
xDecrInt
)
{
paca
->
xLpPacaPtr
->
xDecrInt
=
0
;
...
...
arch/ppc/platforms/iSeries_time.c
View file @
8e70b6f7
...
...
@@ -117,7 +117,7 @@ int timer_interrupt(struct pt_regs * regs)
else
timerRetDisabled
++
;
hardirq_enter
(
cpu
);
irq_enter
(
);
if
(
!
user_mode
(
regs
))
ppc_do_profile
(
instruction_pointer
(
regs
));
...
...
@@ -149,7 +149,7 @@ int timer_interrupt(struct pt_regs * regs)
paca
->
xLpPacaPtr
->
xDecrInt
=
0
;
set_dec
(
(
unsigned
)
next_dec
);
hardirq_exit
(
cpu
);
irq_exit
(
);
if
(
softirq_pending
(
cpu
))
do_softirq
();
...
...
include/asm-ppc/hardirq.h
View file @
8e70b6f7
...
...
@@ -15,8 +15,6 @@
*/
typedef
struct
{
unsigned
long
__softirq_pending
;
/* set_bit is used on this */
unsigned
int
__local_irq_count
;
unsigned
int
__local_bh_count
;
unsigned
int
__syscall_count
;
struct
task_struct
*
__ksoftirqd_task
;
unsigned
int
__last_jiffy_stamp
;
...
...
@@ -25,89 +23,24 @@ typedef struct {
#include <linux/irq_cpustat.h>
/* Standard mappings for irq_cpustat_t above */
#define last_jiffy_stamp(cpu) __IRQ_STAT((cpu), __last_jiffy_stamp)
#define IRQ_OFFSET 64
/*
* Are we in an interrupt context? Either doing bottom half
* or hardware interrupt processing?
*/
#define in_interrupt()
({ int __cpu = smp_processor_id(); \
(local_irq_count(__cpu) + local_bh_count(__cpu) != 0); })
#define in_interrupt()
((preempt_count() & ~PREEMPT_ACTIVE) >= IRQ_OFFSET)
#define in_irq in_interrupt
#define in_irq() (local_irq_count(smp_processor_id()) != 0)
#define irq_enter() (preempt_count() += IRQ_OFFSET)
#define irq_exit() (preempt_count() -= IRQ_OFFSET)
#ifndef CONFIG_SMP
#define hardirq_trylock(cpu) (local_irq_count(cpu) == 0)
#define hardirq_endlock(cpu) do { } while (0)
#define hardirq_enter(cpu) do { preempt_disable(); local_irq_count(cpu)++; } while (0)
#define hardirq_exit(cpu) do { local_irq_count(cpu)--; preempt_enable(); } while (0)
#define synchronize_irq() do { } while (0)
#define release_irqlock(cpu) do { } while (0)
#define synchronize_irq(irq) barrier()
#else
/* CONFIG_SMP */
#include <asm/atomic.h>
extern
unsigned
char
global_irq_holder
;
extern
unsigned
volatile
long
global_irq_lock
;
static
inline
int
irqs_running
(
void
)
{
int
i
;
for
(
i
=
0
;
i
<
NR_CPUS
;
i
++
)
if
(
local_irq_count
(
i
))
return
1
;
return
0
;
}
static
inline
void
release_irqlock
(
int
cpu
)
{
/* if we didn't own the irq lock, just ignore.. */
if
(
global_irq_holder
==
(
unsigned
char
)
cpu
)
{
global_irq_holder
=
NO_PROC_ID
;
clear_bit
(
0
,
&
global_irq_lock
);
}
}
static
inline
void
hardirq_enter
(
int
cpu
)
{
unsigned
int
loops
=
10000000
;
preempt_disable
();
++
local_irq_count
(
cpu
);
while
(
test_bit
(
0
,
&
global_irq_lock
))
{
if
(
cpu
==
global_irq_holder
)
{
printk
(
"uh oh, interrupt while we hold global irq lock! (CPU %d)
\n
"
,
cpu
);
#ifdef CONFIG_XMON
xmon
(
0
);
#endif
break
;
}
if
(
loops
--
==
0
)
{
printk
(
"do_IRQ waiting for irq lock (holder=%d)
\n
"
,
global_irq_holder
);
#ifdef CONFIG_XMON
xmon
(
0
);
#endif
}
}
}
static
inline
void
hardirq_exit
(
int
cpu
)
{
--
local_irq_count
(
cpu
);
preempt_enable
();
}
static
inline
int
hardirq_trylock
(
int
cpu
)
{
return
!
test_bit
(
0
,
&
global_irq_lock
);
}
#define hardirq_endlock(cpu) do { } while (0)
extern
void
synchronize_irq
(
void
);
extern
void
synchronize_irq
(
unsigned
int
irq
);
#endif
/* CONFIG_SMP */
...
...
include/asm-ppc/pgtable.h
View file @
8e70b6f7
...
...
@@ -246,6 +246,23 @@ extern unsigned long ioremap_bot, ioremap_base;
#define _PAGE_KERNEL _PAGE_BASE | _PAGE_WRENABLE | _PAGE_SHARED | _PAGE_HWEXEC
#define _PAGE_IO _PAGE_KERNEL | _PAGE_NO_CACHE | _PAGE_GUARDED
#define _PAGE_RAM _PAGE_KERNEL
#if defined(CONFIG_KGDB) || defined(CONFIG_XMON)
/* We want the debuggers to be able to set breakpoints anywhere, so
* don't write protect the kernel text */
#define _PAGE_RAM_TEXT _PAGE_RAM
#else
#ifdef CONFIG_PPC_STD_MMU
/* On standard PPC MMU, no user access implies kernel read/write
* access, so to write-protect the kernel text we must turn on user
* access */
#define _PAGE_RAM_TEXT (_PAGE_RAM & ~_PAGE_WRENABLE) | _PAGE_USER
#else
#define _PAGE_RAM_TEXT (_PAGE_RAM & ~_PAGE_WRENABLE)
#endif
#endif
#define PAGE_NONE __pgprot(_PAGE_BASE)
#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER)
#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC)
...
...
include/asm-ppc/smplock.h
View file @
8e70b6f7
...
...
@@ -18,7 +18,7 @@ extern spinlock_t kernel_flag;
#ifdef CONFIG_SMP
#define kernel_locked() spin_is_locked(&kernel_flag)
#elif defined(CONFIG_PREEMPT)
#define kernel_locked() preempt_
get_
count()
#define kernel_locked() preempt_count()
#endif
/*
...
...
include/asm-ppc/softirq.h
View file @
8e70b6f7
...
...
@@ -8,31 +8,29 @@
#include <asm/atomic.h>
#include <asm/hardirq.h>
#define local_bh_disable() \
do { \
preempt_disable(); \
local_bh_count(smp_processor_id())++; \
barrier(); \
#define local_bh_disable() \
do { \
preempt_count() += IRQ_OFFSET; \
barrier(); \
} while (0)
#define __local_bh_enable() \
do { \
barrier(); \
local_bh_count(smp_processor_id())--; \
preempt_enable(); \
#define __local_bh_enable() \
do { \
barrier(); \
preempt_count() -= IRQ_OFFSET; \
} while (0)
#define local_bh_enable() \
do { \
barrier(); \
if (
!--local_bh_count(smp_processor_id())
\
&& softirq_pending(smp_processor_id()))
{
\
do_softirq(); \
}
\
preempt_enable();
\
#define local_bh_enable()
\
do {
\
barrier();
\
if (
(preempt_count() -= IRQ_OFFSET) < IRQ_OFFSET
\
&& softirq_pending(smp_processor_id()))
\
do_softirq();
\
if (preempt_count() == 0)
\
preempt_check_resched();
\
} while (0)
#define in_softirq()
(local_bh_count(smp_processor_id()) != 0
)
#define in_softirq()
in_interrupt(
)
#endif
/* __ASM_SOFTIRQ_H */
#endif
/* __KERNEL__ */
include/asm-ppc/system.h
View file @
8e70b6f7
...
...
@@ -98,23 +98,15 @@ extern void dump_regs(struct pt_regs *);
#ifndef CONFIG_SMP
/*
* Compatibility macros, to be removed in future...
*/
#define cli() local_irq_disable()
#define sti() local_irq_enable()
#define save_flags(flags) local_save_flags(flags)
#define restore_flags(flags) local_irq_restore(flags)
#define save_and_cli(flags) local_irq_save(flags)
#else
/* CONFIG_SMP */
extern
void
__global_cli
(
void
);
extern
void
__global_sti
(
void
);
extern
unsigned
long
__global_save_flags
(
void
);
extern
void
__global_restore_flags
(
unsigned
long
);
#define cli() __global_cli()
#define sti() __global_sti()
#define save_flags(x) ((x)=__global_save_flags())
#define restore_flags(x) __global_restore_flags(x)
#endif
/* !CONFIG_SMP */
static
__inline__
unsigned
long
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment