Commit be8a58ff authored by David Mosberger's avatar David Mosberger

ia64: Drop global irqlock support from hardirq.h. Move HP simulator config

	file to arch/ia64/hp/sim/ subdirectory.
parent 8beb1642
......@@ -18,88 +18,92 @@
*/
#define softirq_pending(cpu) (cpu_data(cpu)->softirq_pending)
#define ksoftirqd_task(cpu) (cpu_data(cpu)->ksoftirqd)
#define irq_count(cpu) (cpu_data(cpu)->irq_stat.f.irq_count)
#define bh_count(cpu) (cpu_data(cpu)->irq_stat.f.bh_count)
#define syscall_count(cpu) /* unused on IA-64 */
#define nmi_count(cpu) 0
#define local_softirq_pending() (local_cpu_data->softirq_pending)
#define local_ksoftirqd_task() (local_cpu_data->ksoftirqd)
#define really_local_irq_count() (local_cpu_data->irq_stat.f.irq_count) /* XXX fix me */
#define really_local_bh_count() (local_cpu_data->irq_stat.f.bh_count) /* XXX fix me */
#define local_syscall_count() /* unused on IA-64 */
#define local_nmi_count() 0
/*
* Are we in an interrupt context? Either doing bottom half or hardware interrupt
* processing?
* We put the hardirq and softirq counter into the preemption counter. The bitmask has the
* following meaning:
*
* - bits 0-7 are the preemption count (max preemption depth: 256)
* - bits 8-15 are the softirq count (max # of softirqs: 256)
* - bits 16-31 are the hardirq count (max # of hardirqs: 65536)
*
* - (bit 63 is the PREEMPT_ACTIVE flag---not currently implemented.)
*
* PREEMPT_MASK: 0x000000ff
* SOFTIRQ_MASK: 0x0000ff00
* HARDIRQ_MASK: 0xffff0000
*/
#define in_interrupt() (local_cpu_data->irq_stat.irq_and_bh_counts != 0)
#define in_irq() (local_cpu_data->irq_stat.f.irq_count != 0)
#ifndef CONFIG_SMP
# define local_hardirq_trylock() (really_local_irq_count() == 0)
# define local_hardirq_endlock() do { } while (0)
#define PREEMPT_BITS 8
#define SOFTIRQ_BITS 8
#define HARDIRQ_BITS 16
# define local_irq_enter(irq) (really_local_irq_count()++)
# define local_irq_exit(irq) (really_local_irq_count()--)
#define PREEMPT_SHIFT 0
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
# define synchronize_irq() barrier()
#else
#define __MASK(x) ((1UL << (x))-1)
#define PREEMPT_MASK (__MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
#define HARDIRQ_MASK (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
#define SOFTIRQ_MASK (__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
/*
* The hardirq mask has to be large enough to have space for potentially all IRQ sources
* in the system nesting on a single CPU:
*/
#if (1 << HARDIRQ_BITS) < NR_IRQS
# error HARDIRQ_BITS is too low!
#endif
#include <asm/atomic.h>
#include <asm/smp.h>
extern unsigned int global_irq_holder;
extern volatile unsigned long global_irq_lock;
static inline int
irqs_running (void)
{
int i;
for (i = 0; i < NR_CPUS; i++)
if (irq_count(i))
return 1;
return 0;
}
static inline void
release_irqlock (int cpu)
{
/* if we didn't own the irq lock, just ignore.. */
if (global_irq_holder == cpu) {
global_irq_holder = NO_PROC_ID;
smp_mb__before_clear_bit(); /* need barrier before releasing lock... */
clear_bit(0,&global_irq_lock);
}
}
static inline void
local_irq_enter (int irq)
{
really_local_irq_count()++;
while (test_bit(0,&global_irq_lock)) {
/* nothing */;
}
}
static inline void
local_irq_exit (int irq)
{
really_local_irq_count()--;
}
static inline int
local_hardirq_trylock (void)
{
return !really_local_irq_count() && !test_bit(0,&global_irq_lock);
}
#define local_hardirq_endlock() do { } while (0)
extern void synchronize_irq (void);
/*
* Are we doing bottom half or hardware interrupt processing?
* Are we in a softirq context?
* Interrupt context?
*/
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
#define hardirq_trylock() (!in_interrupt())
#define hardirq_endlock() do { } while (0)
#define irq_enter() (preempt_count() += HARDIRQ_OFFSET)
#if CONFIG_PREEMPT
# error CONFIG_PREEMT currently not supported.
# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
#else
# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
#endif
#define irq_exit() \
do { \
preempt_count() -= IRQ_EXIT_OFFSET; \
if (!in_interrupt() && softirq_pending(smp_processor_id())) \
do_softirq(); \
preempt_enable_no_resched(); \
} while (0)
#ifdef CONFIG_SMP
extern void synchronize_irq (unsigned int irq);
#else
# define synchronize_irq(irq) barrier()
#endif /* CONFIG_SMP */
#endif /* _ASM_IA64_HARDIRQ_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment