Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
be8a58ff
Commit
be8a58ff
authored
22 years ago
by
David Mosberger
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
ia64: Drop global irqlock support from hardirq.h. Move HP simulator config
file to arch/ia64/hp/sim/ subdirectory.
parent
8beb1642
Changes
2
Show whitespace changes
Inline
Side-by-side
Showing
2 changed files
with
72 additions
and
68 deletions
+72
-68
arch/ia64/hp/sim/Config.in
arch/ia64/hp/sim/Config.in
+0
-0
include/asm-ia64/hardirq.h
include/asm-ia64/hardirq.h
+72
-68
No files found.
arch/ia64/hp/Config.in
→
arch/ia64/hp/
sim/
Config.in
View file @
be8a58ff
File moved
This diff is collapsed.
Click to expand it.
include/asm-ia64/hardirq.h
View file @
be8a58ff
...
@@ -18,88 +18,92 @@
...
@@ -18,88 +18,92 @@
*/
*/
#define softirq_pending(cpu) (cpu_data(cpu)->softirq_pending)
#define softirq_pending(cpu) (cpu_data(cpu)->softirq_pending)
#define ksoftirqd_task(cpu) (cpu_data(cpu)->ksoftirqd)
#define ksoftirqd_task(cpu) (cpu_data(cpu)->ksoftirqd)
#define irq_count(cpu) (cpu_data(cpu)->irq_stat.f.irq_count)
#define bh_count(cpu) (cpu_data(cpu)->irq_stat.f.bh_count)
#define syscall_count(cpu)
/* unused on IA-64 */
#define syscall_count(cpu)
/* unused on IA-64 */
#define nmi_count(cpu) 0
#define nmi_count(cpu) 0
#define local_softirq_pending() (local_cpu_data->softirq_pending)
#define local_softirq_pending() (local_cpu_data->softirq_pending)
#define local_ksoftirqd_task() (local_cpu_data->ksoftirqd)
#define local_ksoftirqd_task() (local_cpu_data->ksoftirqd)
#define really_local_irq_count() (local_cpu_data->irq_stat.f.irq_count)
/* XXX fix me */
#define really_local_bh_count() (local_cpu_data->irq_stat.f.bh_count)
/* XXX fix me */
#define local_syscall_count()
/* unused on IA-64 */
#define local_syscall_count()
/* unused on IA-64 */
#define local_nmi_count() 0
#define local_nmi_count() 0
/*
/*
* Are we in an interrupt context? Either doing bottom half or hardware interrupt
* We put the hardirq and softirq counter into the preemption counter. The bitmask has the
* processing?
* following meaning:
*
* - bits 0-7 are the preemption count (max preemption depth: 256)
* - bits 8-15 are the softirq count (max # of softirqs: 256)
* - bits 16-31 are the hardirq count (max # of hardirqs: 65536)
*
* - (bit 63 is the PREEMPT_ACTIVE flag---not currently implemented.)
*
* PREEMPT_MASK: 0x000000ff
* SOFTIRQ_MASK: 0x0000ff00
* HARDIRQ_MASK: 0xffff0000
*/
*/
#define in_interrupt() (local_cpu_data->irq_stat.irq_and_bh_counts != 0)
#define in_irq() (local_cpu_data->irq_stat.f.irq_count != 0)
#
ifndef CONFIG_SMP
#
define PREEMPT_BITS 8
#
define local_hardirq_trylock() (really_local_irq_count() == 0)
#
define SOFTIRQ_BITS 8
#
define local_hardirq_endlock() do { } while (0)
#
define HARDIRQ_BITS 16
# define local_irq_enter(irq) (really_local_irq_count()++)
#define PREEMPT_SHIFT 0
# define local_irq_exit(irq) (really_local_irq_count()--)
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
# define synchronize_irq() barrier()
#define __MASK(x) ((1UL << (x))-1)
#else
#define PREEMPT_MASK (__MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
#define HARDIRQ_MASK (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
#define SOFTIRQ_MASK (__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
/*
* The hardirq mask has to be large enough to have space for potentially all IRQ sources
* in the system nesting on a single CPU:
*/
#if (1 << HARDIRQ_BITS) < NR_IRQS
# error HARDIRQ_BITS is too low!
#endif
#include <asm/atomic.h>
/*
#include <asm/smp.h>
* Are we doing bottom half or hardware interrupt processing?
* Are we in a softirq context?
extern
unsigned
int
global_irq_holder
;
* Interrupt context?
extern
volatile
unsigned
long
global_irq_lock
;
*/
#define in_irq() (hardirq_count())
static
inline
int
#define in_softirq() (softirq_count())
irqs_running
(
void
)
#define in_interrupt() (irq_count())
{
int
i
;
#define hardirq_trylock() (!in_interrupt())
#define hardirq_endlock() do { } while (0)
for
(
i
=
0
;
i
<
NR_CPUS
;
i
++
)
if
(
irq_count
(
i
))
#define irq_enter() (preempt_count() += HARDIRQ_OFFSET)
return
1
;
return
0
;
}
static
inline
void
release_irqlock
(
int
cpu
)
{
/* if we didn't own the irq lock, just ignore.. */
if
(
global_irq_holder
==
cpu
)
{
global_irq_holder
=
NO_PROC_ID
;
smp_mb__before_clear_bit
();
/* need barrier before releasing lock... */
clear_bit
(
0
,
&
global_irq_lock
);
}
}
static
inline
void
local_irq_enter
(
int
irq
)
{
really_local_irq_count
()
++
;
while
(
test_bit
(
0
,
&
global_irq_lock
))
{
/* nothing */
;
}
}
static
inline
void
local_irq_exit
(
int
irq
)
{
really_local_irq_count
()
--
;
}
static
inline
int
local_hardirq_trylock
(
void
)
{
return
!
really_local_irq_count
()
&&
!
test_bit
(
0
,
&
global_irq_lock
);
}
#define local_hardirq_endlock() do { } while (0)
extern
void
synchronize_irq
(
void
);
#if CONFIG_PREEMPT
# error CONFIG_PREEMT currently not supported.
# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
#else
# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
#endif
#define irq_exit() \
do { \
preempt_count() -= IRQ_EXIT_OFFSET; \
if (!in_interrupt() && softirq_pending(smp_processor_id())) \
do_softirq(); \
preempt_enable_no_resched(); \
} while (0)
#ifdef CONFIG_SMP
extern
void
synchronize_irq
(
unsigned
int
irq
);
#else
# define synchronize_irq(irq) barrier()
#endif
/* CONFIG_SMP */
#endif
/* CONFIG_SMP */
#endif
/* _ASM_IA64_HARDIRQ_H */
#endif
/* _ASM_IA64_HARDIRQ_H */
This diff is collapsed.
Click to expand it.
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment