Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
Kirill Smelkov
linux
Commits
53a15ebd
Commit
53a15ebd
authored
Jul 24, 2002
by
David S. Miller
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
SPARC64: Merge up with latest x86 IRQ changes from Ingo.
parent
db3f7025
Changes
4
Show whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
89 additions
and
29 deletions
+89
-29
include/asm-sparc64/hardirq.h
include/asm-sparc64/hardirq.h
+68
-8
include/asm-sparc64/softirq.h
include/asm-sparc64/softirq.h
+9
-13
include/asm-sparc64/system.h
include/asm-sparc64/system.h
+9
-8
include/asm-sparc64/thread_info.h
include/asm-sparc64/thread_info.h
+3
-0
No files found.
include/asm-sparc64/hardirq.h
View file @
53a15ebd
...
@@ -24,23 +24,83 @@ typedef struct {
...
@@ -24,23 +24,83 @@ typedef struct {
#include <linux/irq_cpustat.h>
/* Standard mappings for irq_cpustat_t above */
#include <linux/irq_cpustat.h>
/* Standard mappings for irq_cpustat_t above */
#define IRQ_OFFSET 64
/*
* We put the hardirq and softirq counter into the preemption
* counter. The bitmask has the following meaning:
*
* - bits 0-7 are the preemption count (max preemption depth: 256)
* - bits 8-15 are the softirq count (max # of softirqs: 256)
* - bits 16-23 are the hardirq count (max # of hardirqs: 256)
*
* - ( bit 26 is the PREEMPT_ACTIVE flag. )
*
* PREEMPT_MASK: 0x000000ff
* HARDIRQ_MASK: 0x0000ff00
* SOFTIRQ_MASK: 0x00ff0000
*/
#define PREEMPT_BITS 8
#define SOFTIRQ_BITS 8
#define HARDIRQ_BITS 8
#define PREEMPT_SHIFT 0
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
#define __MASK(x) ((1UL << (x))-1)
#define PREEMPT_MASK (__MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
#define HARDIRQ_MASK (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
#define SOFTIRQ_MASK (__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
#define in_interrupt() \
#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
((preempt_count() & ~PREEMPT_ACTIVE) >= IRQ_OFFSET)
#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
#define in_irq in_interrupt
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
/*
* The hardirq mask has to be large enough to have
* space for potentially all IRQ sources in the system
* nesting on a single CPU:
*/
#if (1 << HARDIRQ_BITS) < NR_IRQS
# error HARDIRQ_BITS is too low!
#endif
/*
* Are we doing bottom half or hardware interrupt processing?
* Are we in a softirq context? Interrupt context?
*/
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
#define hardirq_trylock() (!in_interrupt())
#define hardirq_trylock() (!in_interrupt())
#define hardirq_endlock() do { } while (0)
#define hardirq_endlock() do { } while (0)
#define irq_enter() (preempt_count() += IRQ_OFFSET)
#define irq_enter() (preempt_count() += HARDIRQ_OFFSET)
#define irq_exit() (preempt_count() -= IRQ_OFFSET)
#if CONFIG_PREEMPT
# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
#else
# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
#endif
#define irq_exit() \
do { \
preempt_count() -= IRQ_EXIT_OFFSET; \
if (!in_interrupt() && softirq_pending(smp_processor_id())) \
do_softirq(); \
preempt_enable_no_resched(); \
} while (0)
#ifndef CONFIG_SMP
#ifndef CONFIG_SMP
# define synchronize_irq(irq) barrier()
# define synchronize_irq(irq) barrier()
#else
#else
extern
void
synchronize_irq
(
unsigned
int
irq
);
extern
void
synchronize_irq
(
unsigned
int
irq
);
#endif
#endif
/* CONFIG_SMP */
#endif
/* !(__SPARC64_HARDIRQ_H) */
#endif
/* !(__SPARC64_HARDIRQ_H) */
include/asm-sparc64/softirq.h
View file @
53a15ebd
...
@@ -10,20 +10,16 @@
...
@@ -10,20 +10,16 @@
#include <asm/hardirq.h>
#include <asm/hardirq.h>
#include <asm/system.h>
/* for membar() */
#include <asm/system.h>
/* for membar() */
#define local_bh_disable() do { preempt_count() += IRQ_OFFSET; barrier(); } while (0)
#define local_bh_disable() \
#define __local_bh_enable() do { barrier(); preempt_count() -= IRQ_OFFSET; } while (0)
do { preempt_count() += SOFTIRQ_OFFSET; barrier(); } while (0)
#define __local_bh_enable() \
do { barrier(); preempt_count() -= SOFTIRQ_OFFSET; } while (0)
#define local_bh_enable() \
#define local_bh_enable() \
do {
if (unlikely((preempt_count() == IRQ_OFFSET) &&
\
do {
__local_bh_enable();
\
softirq_pending(smp_processor_id()))) {
\
if (unlikely(!in_interrupt() &&
\
__local_bh_enable();
\
softirq_pending(smp_processor_id())))
\
do_softirq(); \
do_softirq(); \
preempt_check_resched(); \
preempt_check_resched(); \
} else { \
__local_bh_enable(); \
preempt_check_resched(); \
} \
} while (0)
} while (0)
#define in_softirq() in_interrupt()
#endif
/* !(__SPARC64_SOFTIRQ_H) */
#endif
/* !(__SPARC64_SOFTIRQ_H) */
include/asm-sparc64/system.h
View file @
53a15ebd
...
@@ -66,15 +66,16 @@ enum sparc_cpu {
...
@@ -66,15 +66,16 @@ enum sparc_cpu {
#define local_irq_save(flags) ((flags) = read_pil_and_cli())
#define local_irq_save(flags) ((flags) = read_pil_and_cli())
#define local_irq_restore(flags) setipl((flags))
#define local_irq_restore(flags) setipl((flags))
/*
/* On sparc64 IRQ flags are the PIL register. A value of zero
* Compatibility macros - they will be removed after some time.
* means all interrupt levels are enabled, any other value means
* only IRQ levels greater than that value will be received.
* Consequently this means that the lowest IRQ level is one.
*/
*/
#ifndef CONFIG_SMP
#define irqs_disabled() \
#define cli() local_irq_disable()
({ unsigned long flags; \
#define sti() local_irq_enable()
local_save_flags(flags);\
#define save_flags(x) local_save_flags(x)
(flags > 0); \
#define restore_flags(x) local_irq_restore(x)
})
#endif
#define nop() __asm__ __volatile__ ("nop")
#define nop() __asm__ __volatile__ ("nop")
...
...
include/asm-sparc64/thread_info.h
View file @
53a15ebd
...
@@ -114,6 +114,8 @@ struct thread_info {
...
@@ -114,6 +114,8 @@ struct thread_info {
/*
/*
* macros/functions for gaining access to the thread information structure
* macros/functions for gaining access to the thread information structure
*
* preempt_count needs to be 1 initially, until the scheduler is functional.
*/
*/
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLY__
...
@@ -122,6 +124,7 @@ struct thread_info {
...
@@ -122,6 +124,7 @@ struct thread_info {
task: &tsk, \
task: &tsk, \
flags: ((unsigned long)ASI_P) << TI_FLAG_CURRENT_DS_SHIFT, \
flags: ((unsigned long)ASI_P) << TI_FLAG_CURRENT_DS_SHIFT, \
exec_domain: &default_exec_domain, \
exec_domain: &default_exec_domain, \
preempt_count: 1, \
}
}
#define init_thread_info (init_thread_union.thread_info)
#define init_thread_info (init_thread_union.thread_info)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment