Commit 1e0b058c authored by Matthew Wilcox's avatar Matthew Wilcox Committed by Linus Torvalds

[PATCH] include/asm-parisc

Update include/asm-parisc
parent db299c0d
......@@ -27,7 +27,7 @@
.level 2.0w
#endif
#include <asm/offset.h>
#include <asm/offsets.h>
#include <asm/page.h>
#include <asm/asmregs.h>
......@@ -36,17 +36,34 @@
gp = 27
ipsw = 22
#if __PAGE_OFFSET == 0xc0000000
.macro tophys gr
zdep \gr, 31, 30, \gr
/*
* We provide two versions of each macro to convert from physical
* to virtual and vice versa. The "_r1" versions take one argument
* register, but trashes r1 to do the conversion. The other
* version takes two arguments: a src and destination register.
* However, the source and destination registers can not be
* the same register.
*/
.macro tophys grvirt, grphys
ldil L%(__PAGE_OFFSET), \grphys
sub \grvirt, \grphys, \grphys
.endm
.macro tovirt gr
depi 3,1,2,\gr
.macro tovirt grphys, grvirt
ldil L%(__PAGE_OFFSET), \grvirt
add \grphys, \grvirt, \grvirt
.endm
.macro tophys_r1 gr
ldil L%(__PAGE_OFFSET), %r1
sub \gr, %r1, \gr
.endm
.macro tovirt_r1 gr
ldil L%(__PAGE_OFFSET), %r1
add \gr, %r1, \gr
.endm
#else
#error unknown __PAGE_OFFSET
#endif
.macro delay value
ldil L%\value, 1
......@@ -59,11 +76,21 @@
.macro debug value
.endm
#ifdef __LP64__
# define LDIL_FIXUP(reg) depdi 0,31,32,reg
#else
# define LDIL_FIXUP(reg)
#endif
/* Shift Left - note the r and t can NOT be the same! */
.macro shl r, sa, t
dep,z \r, 31-\sa, 32-\sa, \t
.endm
/* The PA 2.0 shift left */
.macro shlw r, sa, t
depw,z \r, 31-\sa, 32-\sa, \t
.endm
/* And the PA 2.0W shift left */
.macro shld r, sa, t
depd,z \r, 63-\sa, 64-\sa, \t
.endm
/* load 32-bit 'value' into 'reg' compensating for the ldil
* sign-extension when running in wide mode.
......@@ -72,7 +99,6 @@
.macro load32 value, reg
ldil L%\value, \reg
ldo R%\value(\reg), \reg
LDIL_FIXUP(\reg)
.endm
#ifdef __LP64__
......@@ -89,7 +115,6 @@
#ifdef __LP64__
ldil L%__gp, %r27
ldo R%__gp(%r27), %r27
LDIL_FIXUP(%r27)
#else
ldil L%$global$, %r27
ldo R%$global$(%r27), %r27
......@@ -102,6 +127,7 @@
#define REST_CR(r, where) LDREG where, %r1 ! mtctl %r1, r
.macro save_general regs
STREG %r1, PT_GR1 (\regs)
STREG %r2, PT_GR2 (\regs)
STREG %r3, PT_GR3 (\regs)
STREG %r4, PT_GR4 (\regs)
......@@ -126,15 +152,16 @@
STREG %r23, PT_GR23(\regs)
STREG %r24, PT_GR24(\regs)
STREG %r25, PT_GR25(\regs)
/* r26 is clobbered by cr19 and assumed to be saved before hand */
/* r26 is saved in get_stack and used to preserve a value across virt_map */
STREG %r27, PT_GR27(\regs)
STREG %r28, PT_GR28(\regs)
/* r29 is already saved and points to PT_xxx struct */
/* r29 is saved in get_stack and used to point to saved registers */
/* r30 stack pointer saved in get_stack */
STREG %r31, PT_GR31(\regs)
.endm
.macro rest_general regs
/* r1 used as a temp in rest_stack and is restored there */
LDREG PT_GR2 (\regs), %r2
LDREG PT_GR3 (\regs), %r3
LDREG PT_GR4 (\regs), %r4
......@@ -162,6 +189,7 @@
LDREG PT_GR26(\regs), %r26
LDREG PT_GR27(\regs), %r27
LDREG PT_GR28(\regs), %r28
/* r29 points to register save area, and is restored in rest_stack */
/* r30 stack pointer restored in rest_stack */
LDREG PT_GR31(\regs), %r31
.endm
......@@ -238,8 +266,8 @@
#ifdef __LP64__
.macro callee_save
ldo 144(%r30), %r30
std %r3, -144(%r30)
std,ma %r3, 144(%r30)
mfctl %cr27, %r3
std %r4, -136(%r30)
std %r5, -128(%r30)
std %r6, -120(%r30)
......@@ -255,9 +283,11 @@
std %r16, -40(%r30)
std %r17, -32(%r30)
std %r18, -24(%r30)
std %r3, -16(%r30)
.endm
.macro callee_rest
ldd -16(%r30), %r3
ldd -24(%r30), %r18
ldd -32(%r30), %r17
ldd -40(%r30), %r16
......@@ -273,52 +303,54 @@
ldd -120(%r30), %r6
ldd -128(%r30), %r5
ldd -136(%r30), %r4
ldd -144(%r30), %r3
ldo -144(%r30), %r30
mtctl %r3, %cr27
ldd,mb -144(%r30), %r3
.endm
#else /* __LP64__ */
#else /* ! __LP64__ */
.macro callee_save
ldo 128(30), 30
stw 3, -128(30)
stw 4, -124(30)
stw 5, -120(30)
stw 6, -116(30)
stw 7, -112(30)
stw 8, -108(30)
stw 9, -104(30)
stw 10, -100(30)
stw 11, -96(30)
stw 12, -92(30)
stw 13, -88(30)
stw 14, -84(30)
stw 15, -80(30)
stw 16, -76(30)
stw 17, -72(30)
stw 18, -68(30)
stw,ma %r3, 128(%r30)
mfctl %cr27, %r3
stw %r4, -124(%r30)
stw %r5, -120(%r30)
stw %r6, -116(%r30)
stw %r7, -112(%r30)
stw %r8, -108(%r30)
stw %r9, -104(%r30)
stw %r10, -100(%r30)
stw %r11, -96(%r30)
stw %r12, -92(%r30)
stw %r13, -88(%r30)
stw %r14, -84(%r30)
stw %r15, -80(%r30)
stw %r16, -76(%r30)
stw %r17, -72(%r30)
stw %r18, -68(%r30)
stw %r3, -64(%r30)
.endm
.macro callee_rest
ldw -68(30), 18
ldw -72(30), 17
ldw -76(30), 16
ldw -80(30), 15
ldw -84(30), 14
ldw -88(30), 13
ldw -92(30), 12
ldw -96(30), 11
ldw -100(30), 10
ldw -104(30), 9
ldw -108(30), 8
ldw -112(30), 7
ldw -116(30), 6
ldw -120(30), 5
ldw -124(30), 4
ldw -128(30), 3
ldo -128(30), 30
ldw -64(%r30), %r3
ldw -68(%r30), %r18
ldw -72(%r30), %r17
ldw -76(%r30), %r16
ldw -80(%r30), %r15
ldw -84(%r30), %r14
ldw -88(%r30), %r13
ldw -92(%r30), %r12
ldw -96(%r30), %r11
ldw -100(%r30), %r10
ldw -104(%r30), %r9
ldw -108(%r30), %r8
ldw -112(%r30), %r7
ldw -116(%r30), %r6
ldw -120(%r30), %r5
ldw -124(%r30), %r4
mtctl %r3, %cr27
ldw,mb -128(%r30), %r3
.endm
#endif /* __LP64__ */
#endif /* ! __LP64__ */
.macro save_specials regs
......@@ -339,14 +371,25 @@
mtctl %r0, %cr18
SAVE_CR (%cr18, PT_IAOQ1(\regs))
#ifdef __LP64__
/* cr11 (sar) is a funny one. 5 bits on PA1.1 and 6 bit on PA2.0
* For PA2.0 mtsar or mtctl always write 6 bits, but mfctl only
* reads 5 bits. Use mfctl,w to read all six bits. Otherwise
* we loose the 6th bit on a save/restore over interrupt.
*/
mfctl,w %cr11, %r1
STREG %r1, PT_SAR (\regs)
#else
SAVE_CR (%cr11, PT_SAR (\regs))
SAVE_CR (%cr22, PT_PSW (\regs))
#endif
SAVE_CR (%cr19, PT_IIR (\regs))
SAVE_CR (%cr28, PT_GR1 (\regs))
SAVE_CR (%cr31, PT_GR29 (\regs))
STREG %r26, PT_GR26 (\regs)
mfctl %cr29, %r26
/*
* Code immediately following this macro (in intr_save) relies
* on r8 containing ipsw.
*/
mfctl %cr22, %r8
STREG %r8, PT_PSW(\regs)
.endm
.macro rest_specials regs
......
......@@ -15,10 +15,12 @@
*/
#ifdef CONFIG_SMP
/* we have an array of spinlocks for our atomic_ts, and a hash function
* to get the right index */
# define ATOMIC_HASH_SIZE 1
# define ATOMIC_HASH(a) (&__atomic_hash[0])
/* Use an array of spinlocks for our atomic_ts.
** Hash function to index into a different SPINLOCK.
** Since "a" is usually an address, ">>8" makes one spinlock per 64-bytes.
*/
# define ATOMIC_HASH_SIZE 4
# define ATOMIC_HASH(a) (&__atomic_hash[(((unsigned long) a)>>8)&(ATOMIC_HASH_SIZE-1)])
extern spinlock_t __atomic_hash[ATOMIC_HASH_SIZE];
/* copied from <asm/spinlock.h> and modified */
......@@ -44,12 +46,101 @@ extern spinlock_t __atomic_hash[ATOMIC_HASH_SIZE];
/* Note that we need not lock read accesses - aligned word writes/reads
* are atomic, so a reader never sees unconsistent values.
*
* Cache-line alignment would conflict with, for example, linux/module.h */
* Cache-line alignment would conflict with, for example, linux/module.h
*/
typedef struct {
volatile int counter;
} atomic_t;
/*
** xchg/cmpxchg moved from asm/system.h - ggg
*/
#if 1
/* This should get optimized out since it's never called.
** Or get a link error if xchg is used "wrong".
*/
extern void __xchg_called_with_bad_pointer(void);
#else
static inline void __xchg_called_with_bad_pointer(void)
{
extern void panic(const char * fmt, ...);
panic("xchg called with bad pointer");
}
#endif
/* __xchg32/64 defined in arch/parisc/lib/bitops.c */
extern unsigned long __xchg8(char, char *);
extern unsigned long __xchg32(int, int *);
#ifdef __LP64__
extern unsigned long __xchg64(unsigned long, unsigned long *);
#endif
/* optimizer better get rid of switch since size is a constant */
static __inline__ unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
int size)
{
switch(size) {
#ifdef __LP64__
case 8: return __xchg64(x,(unsigned long *) ptr);
#endif
case 4: return __xchg32((int) x, (int *) ptr);
case 1: return __xchg8((char) x, (char *) ptr);
}
__xchg_called_with_bad_pointer();
return x;
}
/*
** REVISIT - Abandoned use of LDCW in xchg() for now:
** o need to test sizeof(*ptr) to avoid clearing adjacent bytes
** o and while we are at it, could __LP64__ code use LDCD too?
**
** if (__builtin_constant_p(x) && (x == NULL))
** if (((unsigned long)p & 0xf) == 0)
** return __ldcw(p);
*/
#define xchg(ptr,x) \
((__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr))))
#define __HAVE_ARCH_CMPXCHG 1
/* bug catcher for when unsupported size is used - won't link */
extern void __cmpxchg_called_with_bad_pointer(void);
/* __cmpxchg_u32/u64 defined in arch/parisc/lib/bitops.c */
extern unsigned long __cmpxchg_u32(volatile unsigned int *m, unsigned int old, unsigned int new_);
extern unsigned long __cmpxchg_u64(volatile unsigned long *ptr, unsigned long old, unsigned long new_);
/* don't worry...optimizer will get rid of most of this */
static __inline__ unsigned long
__cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
{
switch(size) {
#ifdef __LP64__
case 8: return __cmpxchg_u64((unsigned long *)ptr, old, new_);
#endif
case 4: return __cmpxchg_u32((unsigned int *)ptr, (unsigned int) old, (unsigned int) new_);
}
__cmpxchg_called_with_bad_pointer();
return old;
}
#define cmpxchg(ptr,o,n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
(__typeof__(*(ptr))) __cmpxchg((ptr), (unsigned long)_o_, \
(unsigned long)_n_, sizeof(*(ptr))); \
})
/* It's possible to reduce all atomic operations to either
* __atomic_add_return, __atomic_set and __atomic_ret (the latter
* is there only for consistency). */
......@@ -75,7 +166,7 @@ static __inline__ void __atomic_set(atomic_t *v, int i)
SPIN_UNLOCK_IRQRESTORE(ATOMIC_HASH(v), flags);
}
static __inline__ int __atomic_read(atomic_t *v)
{
return v->counter;
......@@ -100,4 +191,9 @@ static __inline__ int __atomic_read(atomic_t *v)
#define ATOMIC_INIT(i) { (i) }
#define smp_mb__before_atomic_dec() smp_mb()
#define smp_mb__after_atomic_dec() smp_mb()
#define smp_mb__before_atomic_inc() smp_mb()
#define smp_mb__after_atomic_inc() smp_mb()
#endif
#ifndef _PARISC_BITOPS_H
#define _PARISC_BITOPS_H
#include <linux/spinlock.h>
#include <linux/compiler.h>
#include <asm/system.h>
#include <asm/byteorder.h>
#include <asm/atomic.h>
/*
* HP-PARISC specific bit operations
* for a detailed description of the functions please refer
* to include/asm-i386/bitops.h or kerneldoc
*/
#ifdef __LP64__
# define SHIFT_PER_LONG 6
#ifndef BITS_PER_LONG
......@@ -20,6 +26,79 @@
#define CHOP_SHIFTCOUNT(x) ((x) & (BITS_PER_LONG - 1))
#define smp_mb__before_clear_bit() smp_mb()
#define smp_mb__after_clear_bit() smp_mb()
static __inline__ void set_bit(int nr, void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
unsigned long flags;
addr += (nr >> SHIFT_PER_LONG);
mask = 1L << CHOP_SHIFTCOUNT(nr);
SPIN_LOCK_IRQSAVE(ATOMIC_HASH(addr), flags);
*addr |= mask;
SPIN_UNLOCK_IRQRESTORE(ATOMIC_HASH(addr), flags);
}
static __inline__ void __set_bit(int nr, void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
addr += (nr >> SHIFT_PER_LONG);
mask = 1L << CHOP_SHIFTCOUNT(nr);
*addr |= mask;
}
static __inline__ void clear_bit(int nr, void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
unsigned long flags;
addr += (nr >> SHIFT_PER_LONG);
mask = 1L << CHOP_SHIFTCOUNT(nr);
SPIN_LOCK_IRQSAVE(ATOMIC_HASH(addr), flags);
*addr &= ~mask;
SPIN_UNLOCK_IRQRESTORE(ATOMIC_HASH(addr), flags);
}
static __inline__ void __clear_bit(unsigned long nr, volatile void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
addr += (nr >> SHIFT_PER_LONG);
mask = 1L << CHOP_SHIFTCOUNT(nr);
*addr &= ~mask;
}
static __inline__ void change_bit(int nr, void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
unsigned long flags;
addr += (nr >> SHIFT_PER_LONG);
mask = 1L << CHOP_SHIFTCOUNT(nr);
SPIN_LOCK_IRQSAVE(ATOMIC_HASH(addr), flags);
*addr ^= mask;
SPIN_UNLOCK_IRQRESTORE(ATOMIC_HASH(addr), flags);
}
static __inline__ void __change_bit(int nr, void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
addr += (nr >> SHIFT_PER_LONG);
mask = 1L << CHOP_SHIFTCOUNT(nr);
*addr ^= mask;
}
static __inline__ int test_and_set_bit(int nr, void * address)
{
unsigned long mask;
......@@ -28,14 +107,26 @@ static __inline__ int test_and_set_bit(int nr, void * address)
unsigned long flags;
addr += (nr >> SHIFT_PER_LONG);
mask = 1L << CHOP_SHIFTCOUNT(nr);
SPIN_LOCK_IRQSAVE(ATOMIC_HASH(addr), flags);
oldbit = (*addr & mask) ? 1 : 0;
*addr |= mask;
SPIN_UNLOCK_IRQRESTORE(ATOMIC_HASH(addr), flags);
return oldbit;
}
static __inline__ int __test_and_set_bit(int nr, void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
int oldbit;
addr += (nr >> SHIFT_PER_LONG);
mask = 1L << CHOP_SHIFTCOUNT(nr);
oldbit = (*addr & mask) ? 1 : 0;
*addr |= mask;
SPIN_UNLOCK_IRQRESTORE(ATOMIC_HASH(addr), flags);
return oldbit;
}
......@@ -47,14 +138,26 @@ static __inline__ int test_and_clear_bit(int nr, void * address)
unsigned long flags;
addr += (nr >> SHIFT_PER_LONG);
mask = 1L << CHOP_SHIFTCOUNT(nr);
SPIN_LOCK_IRQSAVE(ATOMIC_HASH(addr), flags);
oldbit = (*addr & mask) ? 1 : 0;
*addr &= ~mask;
SPIN_UNLOCK_IRQRESTORE(ATOMIC_HASH(addr), flags);
return oldbit;
}
static __inline__ int __test_and_clear_bit(int nr, void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
int oldbit;
addr += (nr >> SHIFT_PER_LONG);
mask = 1L << CHOP_SHIFTCOUNT(nr);
oldbit = (*addr & mask) ? 1 : 0;
*addr &= ~mask;
SPIN_UNLOCK_IRQRESTORE(ATOMIC_HASH(addr), flags);
return oldbit;
}
......@@ -66,20 +169,30 @@ static __inline__ int test_and_change_bit(int nr, void * address)
unsigned long flags;
addr += (nr >> SHIFT_PER_LONG);
SPIN_LOCK_IRQSAVE(ATOMIC_HASH(addr), flags);
mask = 1L << CHOP_SHIFTCOUNT(nr);
SPIN_LOCK_IRQSAVE(ATOMIC_HASH(addr), flags);
oldbit = (*addr & mask) ? 1 : 0;
*addr ^= mask;
SPIN_UNLOCK_IRQRESTORE(ATOMIC_HASH(addr), flags);
return oldbit;
}
/* again, the read-only case doesn't have to do any locking */
static __inline__ int __test_and_change_bit(int nr, void * address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
int oldbit;
addr += (nr >> SHIFT_PER_LONG);
mask = 1L << CHOP_SHIFTCOUNT(nr);
oldbit = (*addr & mask) ? 1 : 0;
*addr ^= mask;
static __inline__ int test_bit(int nr, const volatile void *address)
return oldbit;
}
static __inline__ int test_bit(int nr, const void *address)
{
unsigned long mask;
unsigned long *addr = (unsigned long *) address;
......@@ -90,21 +203,12 @@ static __inline__ int test_bit(int nr, const volatile void *address)
return !!(*addr & mask);
}
/* sparc does this, other arch's don't -- what's the right answer? XXX */
#define smp_mb__before_clear_bit() do { } while(0)
#define smp_mb__after_clear_bit() do { } while(0)
#define set_bit(nr,addr) ((void)test_and_set_bit(nr,addr))
#define clear_bit(nr,addr) ((void)test_and_clear_bit(nr,addr))
#define change_bit(nr,addr) ((void)test_and_change_bit(nr,addr))
/* XXX We'd need some binary search here */
extern __inline__ unsigned long ffz(unsigned long word)
{
unsigned long result;
result = 0;
while(word & 1) {
while (word & 1) {
result++;
word >>= 1;
}
......@@ -114,13 +218,40 @@ extern __inline__ unsigned long ffz(unsigned long word)
#ifdef __KERNEL__
/**
* __ffs - find first bit in word.
* @word: The word to search
*
* Undefined if no bit exists, so code should check against 0 first.
*/
static __inline__ unsigned long __ffs(unsigned long word)
{
unsigned long result = 0;
while (!(word & 1UL)) {
result++;
word >>= 1;
}
return result;
}
/*
* ffs: find first bit set. This is defined the same way as
* the libc and compiler builtin ffs routines, therefore
* differs in spirit from the above ffz (man ffs).
*/
static __inline__ int ffs(int x)
{
if (!x)
return 0;
return __ffs((unsigned long)x);
}
/*
* fls: find last bit set.
*/
#define ffs(x) generic_ffs(x)
#define fls(x) generic_fls(x)
/*
* hweightN: returns the hamming weight (i.e. the number
......@@ -131,6 +262,35 @@ extern __inline__ unsigned long ffz(unsigned long word)
#define hweight16(x) generic_hweight16(x)
#define hweight8(x) generic_hweight8(x)
/*
* Every architecture must define this function. It's the fastest
* way of searching a 140-bit bitmap where the first 100 bits are
* unlikely to be set. It's guaranteed that at least one of the 140
* bits is cleared.
*/
static inline int sched_find_first_bit(unsigned long *b)
{
#ifndef __LP64__
if (unlikely(b[0]))
return __ffs(b[0]);
if (unlikely(b[1]))
return __ffs(b[1]) + 32;
if (unlikely(b[2]))
return __ffs(b[2]) + 64;
if (b[3])
return __ffs(b[3]) + 96;
return __ffs(b[4]) + 128;
#else
if (unlikely(b[0]))
return __ffs(b[0]);
if (unlikely(((unsigned int)b[1])))
return __ffs(b[1]) + 64;
if (b[1] >> 32)
return __ffs(b[1] >> 32) + 96;
return __ffs(b[2]) + 128;
#endif
}
#endif /* __KERNEL__ */
/*
......@@ -175,6 +335,44 @@ static __inline__ unsigned long find_next_zero_bit(void * addr, unsigned long si
return result + ffz(tmp);
}
static __inline__ unsigned long find_next_bit(unsigned long *addr, unsigned long size, unsigned long offset)
{
unsigned long *p = addr + (offset >> 6);
unsigned long result = offset & ~(BITS_PER_LONG-1);
unsigned long tmp;
if (offset >= size)
return size;
size -= result;
offset &= (BITS_PER_LONG-1);
if (offset) {
tmp = *(p++);
tmp &= (~0UL << offset);
if (size < BITS_PER_LONG)
goto found_first;
if (tmp)
goto found_middle;
size -= BITS_PER_LONG;
result += BITS_PER_LONG;
}
while (size & ~(BITS_PER_LONG-1)) {
if ((tmp = *(p++)))
goto found_middle;
result += BITS_PER_LONG;
size -= BITS_PER_LONG;
}
if (!size)
return result;
tmp = *p;
found_first:
tmp &= (~0UL >> (BITS_PER_LONG - size));
if (tmp == 0UL) /* Are any bits set? */
return result + size; /* Nope. */
found_middle:
return result + __ffs(tmp);
}
#define _EXT2_HAVE_ASM_BITOPS_
#ifdef __KERNEL__
......@@ -182,8 +380,13 @@ static __inline__ unsigned long find_next_zero_bit(void * addr, unsigned long si
* test_and_{set,clear}_bit guarantee atomicity without
* disabling interrupts.
*/
#ifdef __LP64__
#define ext2_set_bit(nr, addr) test_and_set_bit((nr) ^ 0x38, addr)
#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 0x38, addr)
#else
#define ext2_set_bit(nr, addr) test_and_set_bit((nr) ^ 0x18, addr)
#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr) ^ 0x18, addr)
#endif
#endif /* __KERNEL__ */
......@@ -239,8 +442,9 @@ extern __inline__ unsigned long ext2_find_next_zero_bit(void *addr,
}
/* Bitmap functions for the minix filesystem. */
#define minix_set_bit(nr,addr) ext2_set_bit(nr,addr)
#define minix_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
#define minix_test_and_set_bit(nr,addr) ext2_set_bit(nr,addr)
#define minix_set_bit(nr,addr) ((void)ext2_set_bit(nr,addr))
#define minix_test_and_clear_bit(nr,addr) ext2_clear_bit(nr,addr)
#define minix_test_bit(nr,addr) ext2_test_bit(nr,addr)
#define minix_find_first_zero_bit(addr,size) ext2_find_first_zero_bit(addr,size)
......
......@@ -5,27 +5,18 @@
#ifndef __ARCH_PARISC_CACHE_H
#define __ARCH_PARISC_CACHE_H
#include <linux/config.h>
#ifndef __ASSEMBLY__
/*
** XXX FIXME : L1_CACHE_BYTES (cacheline size) should be a boot time thing.
**
** 32-bit on PA2.0 is not covered well by the #ifdef __LP64__ below.
** PA2.0 processors have 64-byte cachelines.
**
** The issue is mostly cacheline ping-ponging on SMP boxes.
** To avoid this, code should define stuff to be per CPU on cacheline
** aligned boundaries. This can make a 2x or more difference in perf
** depending on how badly the thrashing is.
**
** We don't need to worry about I/O since all PA2.0 boxes (except T600)
** are I/O coherent. That means flushing less than you needed to generally
** doesn't matter - the I/O MMU will read/modify/write the cacheline.
**
** (Digression: it is possible to program I/O MMU's to not first read
** a cacheline for inbound data - ie just grab ownership and start writing.
** While it improves I/O throughput, you gotta know the device driver
** is well behaved and can deal with the issues.)
*/
#if defined(__LP64__)
* PA 2.0 processors have 64-byte cachelines; PA 1.1 processors have
* 32-byte cachelines. The default configuration is not for SMP anyway,
* so if you're building for SMP, you should select the appropriate
* processor type. There is a potential livelock danger when running
* a machine with this value set too small, but it's more probable you'll
* just ruin performance.
*/
#ifdef CONFIG_PA20
#define L1_CACHE_BYTES 64
#else
#define L1_CACHE_BYTES 32
......@@ -38,22 +29,47 @@
#define __cacheline_aligned __attribute__((__aligned__(L1_CACHE_BYTES)))
extern void init_cache(void); /* initializes cache-flushing */
extern void flush_data_cache(void); /* flushes data-cache only */
extern void flush_instruction_cache(void);/* flushes code-cache only */
extern void flush_all_caches(void); /* flushes code and data-cache */
extern void flush_data_cache_local(void); /* flushes local data-cache only */
extern void flush_instruction_cache_local(void); /* flushes local code-cache only */
#ifdef CONFIG_SMP
extern void flush_data_cache(void); /* flushes data-cache only (all processors) */
#else
#define flush_data_cache flush_data_cache_local
#define flush_instruction_cache flush_instruction_cache_local
#endif
extern void parisc_cache_init(void); /* initializes cache-flushing */
extern void flush_all_caches(void); /* flush everything (tlb & cache) */
extern int get_cache_info(char *);
extern void flush_user_icache_range_asm(unsigned long, unsigned long);
extern void flush_kernel_icache_range_asm(unsigned long, unsigned long);
extern void flush_user_dcache_range_asm(unsigned long, unsigned long);
extern void flush_kernel_dcache_range_asm(unsigned long, unsigned long);
extern void flush_kernel_dcache_page(void *);
extern void flush_kernel_icache_page(void *);
extern void disable_sr_hashing(void); /* turns off space register hashing */
extern void disable_sr_hashing_asm(int); /* low level support for above */
extern void free_sid(unsigned long);
unsigned long alloc_sid(void);
extern struct pdc_cache_info cache_info;
struct seq_file;
extern void show_cache_info(struct seq_file *m);
#define fdce(addr) asm volatile("fdce 0(%0)" : : "r" (addr))
#define fice(addr) asm volatile("fice 0(%%sr1,%0)" : : "r" (addr))
extern int split_tlb;
extern int dcache_stride;
extern int icache_stride;
extern struct pdc_cache_info cache_info;
#define pdtlbe(addr) asm volatile("pdtlbe 0(%%sr1,%0)" : : "r" (addr))
#define pdtlb(addr) asm volatile("pdtlb 0(%%sr1,%0)" : : "r" (addr));
#define pitlb(addr) asm volatile("pitlb 0(%%sr1,%0)" : : "r" (addr));
#define pdtlb_kernel(addr) asm volatile("pdtlb 0(%0)" : : "r" (addr));
#define pitlbe(addr) asm volatile("pitlbe 0(%%sr1,%0)" : : "r" (addr))
#define kernel_fdc(addr) asm volatile("fdc 0(%%sr0, %0)" : : "r" (addr))
#endif /* ! __ASSEMBLY__ */
/* Classes of processor wrt: disabling space register hashing */
#define SRHASH_PCXST 0 /* pcxs, pcxt, pcxt_ */
#define SRHASH_PCXL 1 /* pcxl */
#define SRHASH_PA20 2 /* pcxu, pcxu_, pcxw, pcxw_ */
#endif
#ifndef _PARISC_CACHEFLUSH_H
#define _PARISC_CACHEFLUSH_H
#include <linux/config.h>
#include <linux/mm.h>
/* The usual comment is "Caches aren't brain-dead on the <architecture>".
* Unfortunately, that doesn't apply to PA-RISC. */
/* Cache flush operations */
#ifdef CONFIG_SMP
#define flush_cache_mm(mm) flush_cache_all()
#else
#define flush_cache_mm(mm) flush_cache_all_local()
#endif
#define flush_kernel_dcache_range(start,size) \
flush_kernel_dcache_range_asm((start), (start)+(size));
static inline void
flush_page_to_ram(struct page *page)
{
}
extern void flush_cache_all_local(void);
#ifdef CONFIG_SMP
static inline void flush_cache_all(void)
{
smp_call_function((void (*)(void *))flush_cache_all_local, NULL, 1, 1);
flush_cache_all_local();
}
#else
#define flush_cache_all flush_cache_all_local
#endif
/* The following value needs to be tuned and probably scaled with the
* cache size.
*/
#define FLUSH_THRESHOLD 0x80000
static inline void
flush_user_dcache_range(unsigned long start, unsigned long end)
{
#ifdef CONFIG_SMP
flush_user_dcache_range_asm(start,end);
#else
if ((end - start) < FLUSH_THRESHOLD)
flush_user_dcache_range_asm(start,end);
else
flush_data_cache();
#endif
}
static inline void
flush_user_icache_range(unsigned long start, unsigned long end)
{
#ifdef CONFIG_SMP
flush_user_icache_range_asm(start,end);
#else
if ((end - start) < FLUSH_THRESHOLD)
flush_user_icache_range_asm(start,end);
else
flush_instruction_cache();
#endif
}
static inline void flush_dcache_page(struct page *page)
{
if (page->mapping && list_empty(&page->mapping->i_mmap) &&
list_empty(&page->mapping->i_mmap_shared)) {
set_bit(PG_dcache_dirty, &page->flags);
} else {
flush_kernel_dcache_page(page_address(page));
}
}
#define flush_icache_page(vma,page) do { flush_kernel_dcache_page(page_address(page)); flush_kernel_icache_page(page_address(page)); } while (0)
#define flush_icache_range(s,e) do { flush_kernel_dcache_range_asm(s,e); flush_kernel_icache_range_asm(s,e); } while (0)
#define flush_icache_user_range(vma, page, addr, len) \
flush_icache_page((vma), (page))
static inline void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
int sr3;
if (!vma->vm_mm->context) {
BUG();
return;
}
sr3 = mfsp(3);
if (vma->vm_mm->context == sr3) {
flush_user_dcache_range(start,end);
flush_user_icache_range(start,end);
} else {
flush_cache_all();
}
}
static inline void
flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr)
{
int sr3;
if (!vma->vm_mm->context) {
BUG();
return;
}
sr3 = mfsp(3);
if (vma->vm_mm->context == sr3) {
flush_user_dcache_range(vmaddr,vmaddr + PAGE_SIZE);
if (vma->vm_flags & VM_EXEC)
flush_user_icache_range(vmaddr,vmaddr + PAGE_SIZE);
} else {
if (vma->vm_flags & VM_EXEC)
flush_cache_all();
else
flush_data_cache();
}
}
#endif
......@@ -16,10 +16,9 @@
extern unsigned int csum_partial(const unsigned char *, int, unsigned int);
/*
* the same as csum_partial, but copies from src while it
* checksums
* The same as csum_partial, but copies from src while it checksums.
*
* here even more important to align src and dst on a 32-bit (or even
* Here even more important to align src and dst on a 32-bit (or even
* better 64-bit) boundary
*/
extern unsigned int csum_partial_copy_nocheck(const char *, char *, int, unsigned int);
......@@ -28,7 +27,7 @@ extern unsigned int csum_partial_copy_nocheck(const char *, char *, int, unsigne
* this is a new version of the above that records errors it finds in *errp,
* but continues and zeros the rest of the buffer.
*/
unsigned int csum_partial_copy_from_user(const char *src, char *dst, int len, unsigned int sum, int *errp);
extern unsigned int csum_partial_copy_from_user(const char *src, char *dst, int len, unsigned int sum, int *errp);
/*
* Optimized for IP headers, which always checksum on 4 octet boundaries.
......@@ -40,32 +39,31 @@ static inline unsigned short ip_fast_csum(unsigned char * iph,
unsigned int sum;
__asm__ __volatile__ ("
ldws,ma 4(%1), %0
addi -4, %2, %2
comib,>= 0, %2, 2f
ldws,ma 4(%1), %%r19
add %0, %%r19, %0
ldws,ma 4(%1), %%r19
addc %0, %%r19, %0
ldws,ma 4(%1), %%r19
addc %0, %%r19, %0
1: ldws,ma 4(%1), %%r19
addib,<> -1, %2, 1b
addc %0, %%r19, %0
addc %0, %%r0, %0
zdepi -1, 31, 16, %%r19
and %0, %%r19, %%r20
extru %0, 15, 16, %%r21
add %%r20, %%r21, %0
and %0, %%r19, %%r20
extru %0, 15, 16, %%r21
add %%r20, %%r21, %0
subi -1, %0, %0
2:
"
__asm__ __volatile__ (
" ldws,ma 4(%1), %0\n"
" addi -4, %2, %2\n"
" comib,>= 0, %2, 2f\n"
"\n"
" ldws,ma 4(%1), %%r19\n"
" add %0, %%r19, %0\n"
" ldws,ma 4(%1), %%r19\n"
" addc %0, %%r19, %0\n"
" ldws,ma 4(%1), %%r19\n"
" addc %0, %%r19, %0\n"
"1: ldws,ma 4(%1), %%r19\n"
" addib,<> -1, %2, 1b\n"
" addc %0, %%r19, %0\n"
" addc %0, %%r0, %0\n"
"\n"
" zdepi -1, 31, 16, %%r19\n"
" and %0, %%r19, %%r20\n"
" extru %0, 15, 16, %%r21\n"
" add %%r20, %%r21, %0\n"
" and %0, %%r19, %%r20\n"
" extru %0, 15, 16, %%r21\n"
" add %%r20, %%r21, %0\n"
" subi -1, %0, %0\n"
"2:\n"
: "=r" (sum), "=r" (iph), "=r" (ihl)
: "1" (iph), "2" (ihl)
: "r19", "r20", "r21" );
......@@ -78,9 +76,12 @@ static inline unsigned short ip_fast_csum(unsigned char * iph,
*/
static inline unsigned int csum_fold(unsigned int sum)
{
sum = (sum & 0xffff) + (sum >> 16);
sum = (sum & 0xffff) + (sum >> 16);
return ~sum;
/* add the swapped two 16-bit halves of sum,
a possible carry from adding the two 16-bit halves,
will carry from the lower half into the upper half,
giving us the correct sum in the upper half. */
sum += (sum << 16) + (sum >> 16);
return (~sum) >> 16;
}
static inline unsigned long csum_tcpudp_nofold(unsigned long saddr,
......@@ -89,11 +90,11 @@ static inline unsigned long csum_tcpudp_nofold(unsigned long saddr,
unsigned short proto,
unsigned int sum)
{
__asm__("
add %1, %0, %0
addc %2, %0, %0
addc %3, %0, %0
addc %%r0, %0, %0 "
__asm__(
" add %1, %0, %0\n"
" addc %2, %0, %0\n"
" addc %3, %0, %0\n"
" addc %%r0, %0, %0\n"
: "=r" (sum)
: "r" (daddr), "r"(saddr), "r"((proto<<16)+len), "0"(sum));
return sum;
......@@ -120,6 +121,7 @@ static inline unsigned short ip_compute_csum(unsigned char * buf, int len) {
return csum_fold (csum_partial(buf, len, 0));
}
#define _HAVE_ARCH_IPV6_CSUM
static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
struct in6_addr *daddr,
......@@ -127,7 +129,62 @@ static __inline__ unsigned short int csum_ipv6_magic(struct in6_addr *saddr,
unsigned short proto,
unsigned int sum)
{
BUG();
__asm__ __volatile__ (
#if BITS_PER_LONG > 32
/*
** We can execute two loads and two adds per cycle on PA 8000.
** But add insn's get serialized waiting for the carry bit.
** Try to keep 4 registers with "live" values ahead of the ALU.
*/
" ldd,ma 8(%1), %%r19\n" /* get 1st saddr word */
" ldd,ma 8(%2), %%r20\n" /* get 1st daddr word */
" add %8, %3, %3\n"/* add 16-bit proto + len */
" add %%r19, %0, %0\n"
" ldd,ma 8(%1), %%r21\n" /* 2cd saddr */
" ldd,ma 8(%2), %%r22\n" /* 2cd daddr */
" add,dc %%r20, %0, %0\n"
" add,dc %%r21, %0, %0\n"
" add,dc %%r22, %0, %0\n"
" add,dc %3, %0, %0\n" /* fold in proto+len | carry bit */
" extrd,u %0, 31, 32, %%r19\n" /* copy upper half down */
" depdi 0, 31, 32, %0\n" /* clear upper half */
" add %%r19, %0, %0\n" /* fold into 32-bits */
" addc 0, %0, %0\n" /* add carry */
#else
/*
** For PA 1.x, the insn order doesn't matter as much.
** Insn stream is serialized on the carry bit here too.
** result from the previous operation (eg r0 + x)
*/
" ldw,ma 4(%1), %%r19\n" /* get 1st saddr word */
" ldw,ma 4(%2), %%r20\n" /* get 1st daddr word */
" add %8, %3, %3\n" /* add 16-bit proto + len */
" add %%r19, %0, %0\n"
" ldw,ma 4(%1), %%r21\n" /* 2cd saddr */
" addc %%r20, %0, %0\n"
" ldw,ma 4(%2), %%r22\n" /* 2cd daddr */
" addc %%r21, %0, %0\n"
" ldw,ma 4(%1), %%r19\n" /* 3rd saddr */
" addc %%r22, %0, %0\n"
" ldw,ma 4(%2), %%r20\n" /* 3rd daddr */
" addc %%r19, %0, %0\n"
" ldw,ma 4(%1), %%r21\n" /* 4th saddr */
" addc %%r20, %0, %0\n"
" ldw,ma 4(%2), %%r22\n" /* 4th daddr */
" addc %%r21, %0, %0\n"
" addc %%r22, %0, %0\n"
" addc %3, %0, %0\n" /* fold in proto+len, catch carry */
#endif
: "=r" (sum), "=r" (saddr), "=r" (daddr), "=r" (len)
: "0" (sum), "1" (saddr), "2" (daddr), "3" (len), "r" (proto)
: "r19", "r20", "r21", "r22");
return csum_fold(sum);
}
......
#ifndef _PARISC_CURRENT_H
#define _PARISC_CURRENT_H
#include <asm/processor.h>
#include <asm/thread_info.h>
struct task_struct;
static inline struct task_struct * get_current(void)
{
struct task_struct *current;
asm("copy 30,%0" : "=r" (current));
return (struct task_struct *)((long) current & ~(THREAD_SIZE-1));
return current_thread_info()->task;
}
#define current get_current()
......
......@@ -11,13 +11,11 @@
* Delay routines
*/
extern unsigned long loops_per_sec;
static __inline__ void __delay(unsigned long loops) {
asm volatile(
" .balignl 64,0x34000034
addib,UV -1,%0,.
nop"
" .balignl 64,0x34000034\n"
" addib,UV -1,%0,.\n"
" nop\n"
: "=r" (loops) : "0" (loops));
}
......
/* $Id: dma.h,v 1.1 2002/07/20 15:52:25 rhirst Exp $
* linux/include/asm/dma.h: Defines for using and allocating dma channels.
* Written by Hennus Bergman, 1992.
* High DMA channel support & info by Hannu Savolainen
* and John Boyd, Nov. 1992.
* (c) Copyright 2000, Grant Grundler
*/
#ifndef _ASM_DMA_H
#define _ASM_DMA_H
#include <linux/config.h>
#include <asm/io.h> /* need byte IO */
#include <asm/system.h>
#define dma_outb outb
#define dma_inb inb
/*
** DMA_CHUNK_SIZE is used by the SCSI mid-layer to break up
** (or rather not merge) DMA's into managable chunks.
** On parisc, this is more of the software/tuning constraint
** rather than the HW. I/O MMU allocation alogorithms can be
** faster with smaller size is (to some degree).
*/
#define DMA_CHUNK_SIZE (BITS_PER_LONG*PAGE_SIZE)
/* The maximum address that we can perform a DMA transfer to on this platform
** New dynamic DMA interfaces should obsolete this....
*/
#define MAX_DMA_ADDRESS (~0UL)
/*
** We don't have DMA channels... well V-class does but the
** Dynamic DMA Mapping interface will support them... right? :^)
** Note: this is not relevant right now for PA-RISC, but we cannot
** leave this as undefined because some things (e.g. sound)
** won't compile :-(
*/
#define MAX_DMA_CHANNELS 8
#define DMA_MODE_READ 1
#define DMA_MODE_WRITE 2
#define DMA_AUTOINIT 0x10
/* 8237 DMA controllers */
#define IO_DMA1_BASE 0x00 /* 8 bit slave DMA, channels 0..3 */
#define IO_DMA2_BASE 0xC0 /* 16 bit master DMA, ch 4(=slave input)..7 */
/* DMA controller registers */
#define DMA1_CMD_REG 0x08 /* command register (w) */
#define DMA1_STAT_REG 0x08 /* status register (r) */
#define DMA1_REQ_REG 0x09 /* request register (w) */
#define DMA1_MASK_REG 0x0A /* single-channel mask (w) */
#define DMA1_MODE_REG 0x0B /* mode register (w) */
#define DMA1_CLEAR_FF_REG 0x0C /* clear pointer flip-flop (w) */
#define DMA1_TEMP_REG 0x0D /* Temporary Register (r) */
#define DMA1_RESET_REG 0x0D /* Master Clear (w) */
#define DMA1_CLR_MASK_REG 0x0E /* Clear Mask */
#define DMA1_MASK_ALL_REG 0x0F /* all-channels mask (w) */
#define DMA1_EXT_MODE_REG (0x400 | DMA1_MODE_REG)
#define DMA2_CMD_REG 0xD0 /* command register (w) */
#define DMA2_STAT_REG 0xD0 /* status register (r) */
#define DMA2_REQ_REG 0xD2 /* request register (w) */
#define DMA2_MASK_REG 0xD4 /* single-channel mask (w) */
#define DMA2_MODE_REG 0xD6 /* mode register (w) */
#define DMA2_CLEAR_FF_REG 0xD8 /* clear pointer flip-flop (w) */
#define DMA2_TEMP_REG 0xDA /* Temporary Register (r) */
#define DMA2_RESET_REG 0xDA /* Master Clear (w) */
#define DMA2_CLR_MASK_REG 0xDC /* Clear Mask */
#define DMA2_MASK_ALL_REG 0xDE /* all-channels mask (w) */
#define DMA2_EXT_MODE_REG (0x400 | DMA2_MODE_REG)
extern spinlock_t dma_spin_lock;
static __inline__ unsigned long claim_dma_lock(void)
{
unsigned long flags;
spin_lock_irqsave(&dma_spin_lock, flags);
return flags;
}
static __inline__ void release_dma_lock(unsigned long flags)
{
spin_unlock_irqrestore(&dma_spin_lock, flags);
}
/* Get DMA residue count. After a DMA transfer, this
* should return zero. Reading this while a DMA transfer is
* still in progress will return unpredictable results.
* If called before the channel has been used, it may return 1.
* Otherwise, it returns the number of _bytes_ left to transfer.
*
* Assumes DMA flip-flop is clear.
*/
static __inline__ int get_dma_residue(unsigned int dmanr)
{
unsigned int io_port = (dmanr<=3)? ((dmanr&3)<<1) + 1 + IO_DMA1_BASE
: ((dmanr&3)<<2) + 2 + IO_DMA2_BASE;
/* using short to get 16-bit wrap around */
unsigned short count;
count = 1 + dma_inb(io_port);
count += dma_inb(io_port) << 8;
return (dmanr<=3)? count : (count<<1);
}
/* enable/disable a specific DMA channel */
static __inline__ void enable_dma(unsigned int dmanr)
{
#ifdef CONFIG_SUPERIO
if (dmanr<=3)
dma_outb(dmanr, DMA1_MASK_REG);
else
dma_outb(dmanr & 3, DMA2_MASK_REG);
#endif
}
static __inline__ void disable_dma(unsigned int dmanr)
{
#ifdef CONFIG_SUPERIO
if (dmanr<=3)
dma_outb(dmanr | 4, DMA1_MASK_REG);
else
dma_outb((dmanr & 3) | 4, DMA2_MASK_REG);
#endif
}
/* Clear the 'DMA Pointer Flip Flop'.
* Write 0 for LSB/MSB, 1 for MSB/LSB access.
* Use this once to initialize the FF to a known state.
* After that, keep track of it. :-)
* --- In order to do that, the DMA routines below should ---
* --- only be used while holding the DMA lock ! ---
*/
static __inline__ void clear_dma_ff(unsigned int dmanr)
{
}
/* set mode (above) for a specific DMA channel */
static __inline__ void set_dma_mode(unsigned int dmanr, char mode)
{
}
/* Set only the page register bits of the transfer address.
* This is used for successive transfers when we know the contents of
* the lower 16 bits of the DMA current address register, but a 64k boundary
* may have been crossed.
*/
static __inline__ void set_dma_page(unsigned int dmanr, char pagenr)
{
}
/* Set transfer address & page bits for specific DMA channel.
* Assumes dma flipflop is clear.
*/
static __inline__ void set_dma_addr(unsigned int dmanr, unsigned int a)
{
}
/* Set transfer size (max 64k for DMA1..3, 128k for DMA5..7) for
* a specific DMA channel.
* You must ensure the parameters are valid.
* NOTE: from a manual: "the number of transfers is one more
* than the initial word count"! This is taken into account.
* Assumes dma flip-flop is clear.
* NOTE 2: "count" represents _bytes_ and must be even for channels 5-7.
*/
static __inline__ void set_dma_count(unsigned int dmanr, unsigned int count)
{
}
/* These are in kernel/dma.c: */
extern int request_dma(unsigned int dmanr, const char * device_id); /* reserve a DMA channel */
extern void free_dma(unsigned int dmanr); /* release it again */
extern int get_dma_list(char *buf); /* proc/dma support */
#ifdef CONFIG_PCI
extern int isa_dma_bridge_buggy;
#else
#define isa_dma_bridge_buggy (0)
#endif
#endif /* _ASM_DMA_H */
/*
* eisa_bus.h interface between the eisa BA driver and the bus enumerator
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Copyright (c) 2002 Daniel Engstrom <5116@telia.com>
*
*/
#ifndef ASM_EISA_H
#define ASM_EISA_H
extern void eisa_make_irq_level(int num);
extern void eisa_make_irq_edge(int num);
extern int eisa_enumerator(unsigned long eeprom_addr,
struct resource *io_parent,
struct resource *mem_parent);
extern int eisa_eeprom_init(unsigned long addr);
#endif
/*
* eisa_eeprom.h - provide support for EISA adapters in PA-RISC machines
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* Copyright (c) 2001, 2002 Daniel Engstrom <5116@telia.com>
*
*/
#ifndef ASM_EISA_EEPROM_H
#define ASM_EISA_EEPROM_H
#define HPEE_MAX_LENGTH 0x2000 /* maximum eeprom length */
#define HPEE_SLOT_INFO(slot) (20+(48*slot))
struct eeprom_header
{
u_int32_t num_writes; /* number of writes */
u_int8_t flags; /* flags, usage? */
u_int8_t ver_maj;
u_int8_t ver_min;
u_int8_t num_slots; /* number of EISA slots in system */
u_int16_t csum; /* checksum, I dont know how to calulate this */
u_int8_t pad[10];
} __attribute__ ((packed));
struct eeprom_eisa_slot_info
{
u_int32_t eisa_slot_id;
u_int32_t config_data_offset;
u_int32_t num_writes;
u_int16_t csum;
u_int16_t num_functions;
u_int16_t config_data_length;
/* bits 0..3 are the duplicate slot id */
#define HPEE_SLOT_INFO_EMBEDDED 0x10
#define HPEE_SLOT_INFO_VIRTUAL 0x20
#define HPEE_SLOT_INFO_NO_READID 0x40
#define HPEE_SLOT_INFO_DUPLICATE 0x80
u_int8_t slot_info;
#define HPEE_SLOT_FEATURES_ENABLE 0x01
#define HPEE_SLOT_FEATURES_IOCHK 0x02
#define HPEE_SLOT_FEATURES_CFG_INCOMPLETE 0x80
u_int8_t slot_features;
u_int8_t ver_min;
u_int8_t ver_maj;
#define HPEE_FUNCTION_INFO_HAVE_TYPE 0x01
#define HPEE_FUNCTION_INFO_HAVE_MEMORY 0x02
#define HPEE_FUNCTION_INFO_HAVE_IRQ 0x04
#define HPEE_FUNCTION_INFO_HAVE_DMA 0x08
#define HPEE_FUNCTION_INFO_HAVE_PORT 0x10
#define HPEE_FUNCTION_INFO_HAVE_PORT_INIT 0x20
/* I think there are two slighty different
* versions of the function_info field
* one int the fixed header and one optional
* in the parsed slot data area */
#define HPEE_FUNCTION_INFO_HAVE_FUNCTION 0x01
#define HPEE_FUNCTION_INFO_F_DISABLED 0x80
#define HPEE_FUNCTION_INFO_CFG_FREE_FORM 0x40
u_int8_t function_info;
#define HPEE_FLAG_BOARD_IS_ISA 0x01 /* flag and minor version for isa board */
u_int8_t flags;
u_int8_t pad[24];
} __attribute__ ((packed));
#define HPEE_MEMORY_MAX_ENT 9
/* memory descriptor: byte 0 */
#define HPEE_MEMORY_WRITABLE 0x01
#define HPEE_MEMORY_CACHABLE 0x02
#define HPEE_MEMORY_TYPE_MASK 0x18
#define HPEE_MEMORY_TYPE_SYS 0x00
#define HPEE_MEMORY_TYPE_EXP 0x08
#define HPEE_MEMORY_TYPE_VIR 0x10
#define HPEE_MEMORY_TYPE_OTH 0x18
#define HPEE_MEMORY_SHARED 0x20
#define HPEE_MEMORY_MORE 0x80
/* memory descriptor: byte 1 */
#define HPEE_MEMORY_WIDTH_MASK 0x03
#define HPEE_MEMORY_WIDTH_BYTE 0x00
#define HPEE_MEMORY_WIDTH_WORD 0x01
#define HPEE_MEMORY_WIDTH_DWORD 0x02
#define HPEE_MEMORY_DECODE_MASK 0x0c
#define HPEE_MEMORY_DECODE_20BITS 0x00
#define HPEE_MEMORY_DECODE_24BITS 0x04
#define HPEE_MEMORY_DECODE_32BITS 0x08
/* byte 2 and 3 are a 16bit LE value
* containging the memory size in kilobytes */
/* byte 4,5,6 are a 24bit LE value
* containing the memory base address */
#define HPEE_IRQ_MAX_ENT 7
/* Interrupt entry: byte 0 */
#define HPEE_IRQ_CHANNEL_MASK 0xf
#define HPEE_IRQ_TRIG_LEVEL 0x20
#define HPEE_IRQ_MORE 0x80
/* byte 1 seems to be unused */
#define HPEE_DMA_MAX_ENT 4
/* dma entry: byte 0 */
#define HPEE_DMA_CHANNEL_MASK 7
#define HPEE_DMA_SIZE_MASK 0xc
#define HPEE_DMA_SIZE_BYTE 0x0
#define HPEE_DMA_SIZE_WORD 0x4
#define HPEE_DMA_SIZE_DWORD 0x8
#define HPEE_DMA_SHARED 0x40
#define HPEE_DMA_MORE 0x80
/* dma entry: byte 1 */
#define HPEE_DMA_TIMING_MASK 0x30
#define HPEE_DMA_TIMING_ISA 0x0
#define HPEE_DMA_TIMING_TYPEA 0x10
#define HPEE_DMA_TIMING_TYPEB 0x20
#define HPEE_DMA_TIMING_TYPEC 0x30
#define HPEE_PORT_MAX_ENT 20
/* port entry byte 0 */
#define HPEE_PORT_SIZE_MASK 0x1f
#define HPEE_PORT_SHARED 0x40
#define HPEE_PORT_MORE 0x80
/* byte 1 and 2 is a 16bit LE value
* conating the start port number */
#define HPEE_PORT_INIT_MAX_LEN 60 /* in bytes here */
/* port init entry byte 0 */
#define HPEE_PORT_INIT_WIDTH_MASK 0x3
#define HPEE_PORT_INIT_WIDTH_BYTE 0x0
#define HPEE_PORT_INIT_WIDTH_WORD 0x1
#define HPEE_PORT_INIT_WIDTH_DWORD 0x2
#define HPEE_PORT_INIT_MASK 0x4
#define HPEE_PORT_INIT_MORE 0x80
#define HPEE_SELECTION_MAX_ENT 26
#define HPEE_TYPE_MAX_LEN 80
#endif
......@@ -9,19 +9,13 @@
#define EM_PARISC 15
#define ELF_NGREG 32
#define ELF_NFPREG 32
typedef unsigned long elf_greg_t;
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
typedef double elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#define ELF_CORE_COPY_REGS(gregs, regs) \
memcpy(gregs, regs, \
sizeof(struct pt_regs) < sizeof(elf_gregset_t)? \
sizeof(struct pt_regs): sizeof(elf_gregset_t));
/*
* The following definitions are those for 32-bit ELF binaries on a 32-bit kernel
* and for 64-bit binaries on a 64-bit kernel. To run 32-bit binaries on a 64-bit
* kernel, arch/parisc64/kernel/binfmt_elf32.c defines these macros appropriately
* and then #includes binfmt_elf.c, which then includes this file.
*/
#ifndef ELF_CLASS
/*
* This is used to ensure we don't load something for the wrong architecture.
......@@ -30,16 +24,84 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
* the following macros are for the default case. However, for the 64
* bit kernel we also support 32 bit parisc binaries. To do that
* arch/parisc64/kernel/binfmt_elf32.c defines its own set of these
* macros, and then if includes fs/binfmt_elf.c to provide an alternate
* macros, and then it includes fs/binfmt_elf.c to provide an alternate
* elf binary handler for 32 bit binaries (on the 64 bit kernel).
*/
#ifdef __LP64__
#define ELF_CLASS ELFCLASS64
#else
#define ELF_CLASS ELFCLASS32
#endif
typedef unsigned long elf_greg_t;
/* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in
intent than poking at uname or /proc/cpuinfo.
For the moment, we have only optimizations for the Intel generations,
but that could change... */
#define ELF_PLATFORM ("PARISC\0" /*+((boot_cpu_data.x86-3)*5) */)
#ifdef __KERNEL__
#define SET_PERSONALITY(ex, ibcs2) \
current->personality = PER_LINUX
#endif
/*
* Fill in general registers in a core dump. This saves pretty
* much the same registers as hp-ux, although in a different order.
* Registers marked # below are not currently saved in pt_regs, so
* we use their current values here.
*
* gr0..gr31
* sr0..sr7
* iaoq0..iaoq1
* iasq0..iasq1
* cr11 (sar)
* cr19 (iir)
* cr20 (isr)
* cr21 (ior)
* # cr22 (ipsw)
* # cr0 (recovery counter)
* # cr24..cr31 (temporary registers)
* # cr8,9,12,13 (protection IDs)
* # cr10 (scr/ccr)
* # cr15 (ext int enable mask)
*
*/
#define ELF_CORE_COPY_REGS(dst, pt) \
memset(dst, 0, sizeof(dst)); /* don't leak any "random" bits */ \
memcpy(dst + 0, pt->gr, 32 * sizeof(elf_greg_t)); \
memcpy(dst + 32, pt->sr, 8 * sizeof(elf_greg_t)); \
memcpy(dst + 40, pt->iaoq, 2 * sizeof(elf_greg_t)); \
memcpy(dst + 42, pt->iasq, 2 * sizeof(elf_greg_t)); \
dst[44] = pt->sar; dst[45] = pt->iir; \
dst[46] = pt->isr; dst[47] = pt->ior; \
dst[48] = mfctl(22); dst[49] = mfctl(0); \
dst[50] = mfctl(24); dst[51] = mfctl(25); \
dst[52] = mfctl(26); dst[53] = mfctl(27); \
dst[54] = mfctl(28); dst[55] = mfctl(29); \
dst[56] = mfctl(30); dst[57] = mfctl(31); \
dst[58] = mfctl( 8); dst[59] = mfctl( 9); \
dst[60] = mfctl(12); dst[61] = mfctl(13); \
dst[62] = mfctl(10); dst[63] = mfctl(15);
#endif /* ! ELF_CLASS */
#define ELF_NGREG 80 /* We only need 64 at present, but leave space
for expansion. */
typedef elf_greg_t elf_gregset_t[ELF_NGREG];
#define ELF_NFPREG 32
typedef double elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
struct pt_regs; /* forward declaration... */
#define elf_check_arch(x) ((x)->e_machine == EM_PARISC && (x)->e_ident[EI_CLASS] == ELF_CLASS)
/*
......@@ -80,18 +142,4 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#define ELF_HWCAP 0
/* (boot_cpu_data.x86_capability) */
/* This yields a string that ld.so will use to load implementation
specific libraries for optimization. This is more specific in
intent than poking at uname or /proc/cpuinfo.
For the moment, we have only optimizations for the Intel generations,
but that could change... */
#define ELF_PLATFORM ("PARISC\0" /*+((boot_cpu_data.x86-3)*5) */)
#ifdef __KERNEL__
#define SET_PERSONALITY(ex, ibcs2) \
current->personality = PER_LINUX
#endif
#endif
......@@ -24,6 +24,7 @@
#define O_DIRECT 00040000 /* direct disk access hint - currently ignored */
#define O_DIRECTORY 00010000 /* must be a directory */
#define O_NOFOLLOW 00000200 /* don't follow links */
#define O_INVISIBLE 04000000 /* invisible I/O, for DMAPI/XDSM */
#define F_DUPFD 0 /* dup */
#define F_GETFD 1 /* get f_flags */
......
#ifndef _ASM_FIXMAP_H
#define _ASM_FIXMAP_H
#define FIXADDR_TOP (0xffffe000UL)
#define FIXADDR_SIZE (0 << PAGE_SHIFT)
#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
/*
* Allocate a 8 Mb temporary mapping area for copy_user_page/clear_user_page.
* This area needs to be aligned on a 8 Mb boundary.
*/
#define TMPALIAS_MAP_START (__PAGE_OFFSET - 0x01000000)
#define FIXADDR_START ((unsigned long)TMPALIAS_MAP_START)
#endif
/*
* Architecture specific parts of the Floppy driver
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 1995
*/
#ifndef __ASM_PARISC_FLOPPY_H
#define __ASM_PARISC_FLOPPY_H
#include <linux/vmalloc.h>
/*
* The DMA channel used by the floppy controller cannot access data at
* addresses >= 16MB
*
* Went back to the 1MB limit, as some people had problems with the floppy
* driver otherwise. It doesn't matter much for performance anyway, as most
* floppy accesses go through the track buffer.
*/
#define _CROSS_64KB(a,s,vdma) \
(!vdma && ((unsigned long)(a)/K_64 != ((unsigned long)(a) + (s) - 1) / K_64))
#define CROSS_64KB(a,s) _CROSS_64KB(a,s,use_virtual_dma & 1)
#define SW fd_routine[use_virtual_dma&1]
#define CSW fd_routine[can_use_virtual_dma & 1]
#define fd_inb(port) readb(port)
#define fd_outb(value, port) writeb(value, port)
#define fd_request_dma() CSW._request_dma(FLOPPY_DMA,"floppy")
#define fd_free_dma() CSW._free_dma(FLOPPY_DMA)
#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
#define fd_free_irq() free_irq(FLOPPY_IRQ, NULL)
#define fd_get_dma_residue() SW._get_dma_residue(FLOPPY_DMA)
#define fd_dma_mem_alloc(size) SW._dma_mem_alloc(size)
#define fd_dma_setup(addr, size, mode, io) SW._dma_setup(addr, size, mode, io)
#define FLOPPY_CAN_FALLBACK_ON_NODMA
static int virtual_dma_count=0;
static int virtual_dma_residue=0;
static char *virtual_dma_addr=0;
static int virtual_dma_mode=0;
static int doing_pdma=0;
static void floppy_hardint(int irq, void *dev_id, struct pt_regs * regs)
{
register unsigned char st;
#undef TRACE_FLPY_INT
#ifdef TRACE_FLPY_INT
static int calls=0;
static int bytes=0;
static int dma_wait=0;
#endif
if (!doing_pdma) {
floppy_interrupt(irq, dev_id, regs);
return;
}
#ifdef TRACE_FLPY_INT
if(!calls)
bytes = virtual_dma_count;
#endif
{
register int lcount;
register char *lptr = virtual_dma_addr;
for (lcount = virtual_dma_count; lcount; lcount--) {
st = fd_inb(virtual_dma_port+4) & 0xa0 ;
if (st != 0xa0)
break;
if (virtual_dma_mode) {
fd_outb(*lptr, virtual_dma_port+5);
} else {
*lptr = fd_inb(virtual_dma_port+5);
}
lptr++;
}
virtual_dma_count = lcount;
virtual_dma_addr = lptr;
st = fd_inb(virtual_dma_port+4);
}
#ifdef TRACE_FLPY_INT
calls++;
#endif
if (st == 0x20)
return;
if (!(st & 0x20)) {
virtual_dma_residue += virtual_dma_count;
virtual_dma_count = 0;
#ifdef TRACE_FLPY_INT
printk("count=%x, residue=%x calls=%d bytes=%d dma_wait=%d\n",
virtual_dma_count, virtual_dma_residue, calls, bytes,
dma_wait);
calls = 0;
dma_wait=0;
#endif
doing_pdma = 0;
floppy_interrupt(irq, dev_id, regs);
return;
}
#ifdef TRACE_FLPY_INT
if (!virtual_dma_count)
dma_wait++;
#endif
}
static void fd_disable_dma(void)
{
if(! (can_use_virtual_dma & 1))
disable_dma(FLOPPY_DMA);
doing_pdma = 0;
virtual_dma_residue += virtual_dma_count;
virtual_dma_count=0;
}
static int vdma_request_dma(unsigned int dmanr, const char * device_id)
{
return 0;
}
static void vdma_nop(unsigned int dummy)
{
}
static int vdma_get_dma_residue(unsigned int dummy)
{
return virtual_dma_count + virtual_dma_residue;
}
static int fd_request_irq(void)
{
if(can_use_virtual_dma)
return request_irq(FLOPPY_IRQ, floppy_hardint,SA_INTERRUPT,
"floppy", NULL);
else
return request_irq(FLOPPY_IRQ, floppy_interrupt,
SA_INTERRUPT|SA_SAMPLE_RANDOM,
"floppy", NULL);
}
static unsigned long dma_mem_alloc(unsigned long size)
{
return __get_dma_pages(GFP_KERNEL, get_order(size));
}
static unsigned long vdma_mem_alloc(unsigned long size)
{
return (unsigned long) vmalloc(size);
}
#define nodma_mem_alloc(size) vdma_mem_alloc(size)
static void _fd_dma_mem_free(unsigned long addr, unsigned long size)
{
if((unsigned int) addr >= (unsigned int) high_memory)
return vfree((void *)addr);
else
free_pages(addr, get_order(size));
}
#define fd_dma_mem_free(addr, size) _fd_dma_mem_free(addr, size)
static void _fd_chose_dma_mode(char *addr, unsigned long size)
{
if(can_use_virtual_dma == 2) {
if((unsigned int) addr >= (unsigned int) high_memory ||
virt_to_bus(addr) >= 0x1000000 ||
_CROSS_64KB(addr, size, 0))
use_virtual_dma = 1;
else
use_virtual_dma = 0;
} else {
use_virtual_dma = can_use_virtual_dma & 1;
}
}
#define fd_chose_dma_mode(addr, size) _fd_chose_dma_mode(addr, size)
static int vdma_dma_setup(char *addr, unsigned long size, int mode, int io)
{
doing_pdma = 1;
virtual_dma_port = io;
virtual_dma_mode = (mode == DMA_MODE_WRITE);
virtual_dma_addr = addr;
virtual_dma_count = size;
virtual_dma_residue = 0;
return 0;
}
static int hard_dma_setup(char *addr, unsigned long size, int mode, int io)
{
#ifdef FLOPPY_SANITY_CHECK
if (CROSS_64KB(addr, size)) {
printk("DMA crossing 64-K boundary %p-%p\n", addr, addr+size);
return -1;
}
#endif
/* actual, physical DMA */
doing_pdma = 0;
clear_dma_ff(FLOPPY_DMA);
set_dma_mode(FLOPPY_DMA,mode);
set_dma_addr(FLOPPY_DMA,virt_to_bus(addr));
set_dma_count(FLOPPY_DMA,size);
enable_dma(FLOPPY_DMA);
return 0;
}
struct fd_routine_l {
int (*_request_dma)(unsigned int dmanr, const char * device_id);
void (*_free_dma)(unsigned int dmanr);
int (*_get_dma_residue)(unsigned int dummy);
unsigned long (*_dma_mem_alloc) (unsigned long size);
int (*_dma_setup)(char *addr, unsigned long size, int mode, int io);
} fd_routine[] = {
{
request_dma,
free_dma,
get_dma_residue,
dma_mem_alloc,
hard_dma_setup
},
{
vdma_request_dma,
vdma_nop,
vdma_get_dma_residue,
vdma_mem_alloc,
vdma_dma_setup
}
};
static int FDC1 = 0x3f0; /* Lies. Floppy controller is memory mapped, not io mapped */
static int FDC2 = -1;
#define FLOPPY0_TYPE 0
#define FLOPPY1_TYPE 0
#define N_FDC 1
#define N_DRIVE 8
#define FLOPPY_MOTOR_MASK 0xf0
#define AUTO_DMA
#define EXTRA_FLOPPY_PARAMS
#endif /* __ASM_PARISC_FLOPPY_H */
/*
* Architecture specific parts of HP's STI (framebuffer) driver
* structures are HP-UX compatible for XFree86 usage
*/
#ifndef __ASM_PARISC_GRFIOCTL_H
#define __ASM_PARISC_GRFIOCTL_H
/* upper 32 bits of graphics id (HP/UX identifier) */
#define GRFGATOR 8
#define S9000_ID_S300 9
#define GRFBOBCAT 9
#define GRFCATSEYE 9
#define S9000_ID_98720 10
#define GRFRBOX 10
#define S9000_ID_98550 11
#define GRFFIREEYE 11
#define S9000_ID_A1096A 12
#define GRFHYPERION 12
#define S9000_ID_FRI 13
#define S9000_ID_98730 14
#define GRFDAVINCI 14
#define S9000_ID_98705 0x26C08070 /* Tigershark */
#define S9000_ID_98736 0x26D148AB
#define S9000_ID_A1659A 0x26D1482A /* CRX 8 plane color (=ELK) */
#define S9000_ID_ELK S9000_ID_A1659A
#define S9000_ID_A1439A 0x26D148EE /* CRX24 = CRX+ (24-plane color) */
#define S9000_ID_A1924A 0x26D1488C /* GRX gray-scale */
#define S9000_ID_ELM S9000_ID_A1924A
#define S9000_ID_98765 0x27480DEF
#define S9000_ID_ELK_768 0x27482101
#define S9000_ID_STINGER 0x27A4A402
#define S9000_ID_TIMBER 0x27F12392 /* Bushmaster (710) Graphics */
#define S9000_ID_TOMCAT 0x27FCCB6D /* dual-headed ELK (Dual CRX) */
#define S9000_ID_ARTIST 0x2B4DED6D /* Artist (Gecko/712 & 715) onboard Graphics */
#define S9000_ID_HCRX 0x2BCB015A /* Hyperdrive/Hyperbowl (A4071A) Graphics */
#define CRX24_OVERLAY_PLANES 0x920825AA /* Overlay planes on CRX24 */
#define CRT_ID_ELK_1024 S9000_ID_ELK_768 /* Elk 1024x768 CRX */
#define CRT_ID_ELK_1280 S9000_ID_A1659A /* Elk 1280x1024 CRX */
#define CRT_ID_ELK_1024DB 0x27849CA5 /* Elk 1024x768 double buffer */
#define CRT_ID_ELK_GS S9000_ID_A1924A /* Elk 1280x1024 GreyScale */
#define CRT_ID_CRX24 S9000_ID_A1439A /* Piranha */
#define CRT_ID_VISUALIZE_EG 0x2D08C0A7 /* Graffiti (built-in B132+/B160L) */
#define CRT_ID_THUNDER 0x2F23E5FC /* Thunder 1 VISUALIZE 48*/
#define CRT_ID_THUNDER2 0x2F8D570E /* Thunder 2 VISUALIZE 48 XP*/
#define CRT_ID_HCRX S9000_ID_HCRX /* Hyperdrive HCRX */
#define CRT_ID_CRX48Z S9000_ID_STINGER /* Stinger */
#define CRT_ID_DUAL_CRX S9000_ID_TOMCAT /* Tomcat */
#define CRT_ID_PVRX S9000_ID_98705 /* Tigershark */
#define CRT_ID_TIMBER S9000_ID_TIMBER /* Timber (710 builtin) */
#define CRT_ID_TVRX S9000_ID_98765 /* TVRX (gto/falcon) */
#define CRT_ID_ARTIST S9000_ID_ARTIST /* Artist */
#define CRT_ID_SUMMIT 0x2FC1066B /* Summit FX2, FX4, FX6 ... */
/* structure for ioctl(GCDESCRIBE) */
#define gaddr_t unsigned long /* FIXME: PA2.0 (64bit) portable ? */
struct grf_fbinfo {
unsigned int id; /* upper 32 bits of graphics id */
unsigned int mapsize; /* mapped size of framebuffer */
unsigned int dwidth, dlength;/* x and y sizes */
unsigned int width, length; /* total x and total y size */
unsigned int xlen; /* x pitch size */
unsigned int bpp, bppu; /* bits per pixel and used bpp */
unsigned int npl, nplbytes; /* # of planes and bytes per plane */
char name[32]; /* name of the device (from ROM) */
unsigned int attr; /* attributes */
gaddr_t fbbase, regbase;/* framebuffer and register base addr */
gaddr_t regions[6]; /* region bases */
};
#define GCID _IOR('G', 0, int)
#define GCON _IO('G', 1)
#define GCOFF _IO('G', 2)
#define GCAON _IO('G', 3)
#define GCAOFF _IO('G', 4)
#define GCMAP _IOWR('G', 5, int)
#define GCUNMAP _IOWR('G', 6, int)
#define GCMAP_HPUX _IO('G', 5)
#define GCUNMAP_HPUX _IO('G', 6)
#define GCLOCK _IO('G', 7)
#define GCUNLOCK _IO('G', 8)
#define GCLOCK_MINIMUM _IO('G', 9)
#define GCUNLOCK_MINIMUM _IO('G', 10)
#define GCSTATIC_CMAP _IO('G', 11)
#define GCVARIABLE_CMAP _IO('G', 12)
#define GCTERM _IOWR('G',20,int) /* multi-headed Tomcat */
#define GCDESCRIBE _IOR('G', 21, struct grf_fbinfo)
#define GCFASTLOCK _IO('G', 26)
#endif /* __ASM_PARISC_GRFIOCTL_H */
......@@ -3,49 +3,17 @@
#ifdef __KERNEL__
#include <linux/types.h>
#include <asm/hardware.h> /* for struct hp_device */
#include <asm/io.h> /* temporary for __raw_{read,write} */
/*
* The convention used for inb/outb etc. is that names starting with
* two underscores are the inline versions, names starting with a
* single underscore are proper functions, and names starting with a
* letter are macros that map in some way to inline or proper function
* versions. Not all that pretty, but before you change it, be sure
* to convince yourself that it won't break anything (in particular
* module support).
/* Please, call ioremap and use {read,write}[bwl] instead. These functions
* are not very fast.
*/
extern u8 _gsc_readb(void *);
extern u16 _gsc_readw(void *);
extern u32 _gsc_readl(void *);
extern u64 _gsc_readq(void *);
extern void _gsc_writeb(u8, void *);
extern void _gsc_writew(u16,void *);
extern void _gsc_writel(u32,void *);
extern void _gsc_writeq(u64,void *);
#define gsc_readb(a) _gsc_readb((void *)(a))
#define gsc_readw(a) _gsc_readw((void *)(a))
#define gsc_readl(a) _gsc_readl((void *)(a))
#define gsc_readq(a) _gsc_readq((void *)(a))
#define gsc_writeb(v,a) _gsc_writeb((v),(void *)(a))
#define gsc_writew(v,a) _gsc_writew((v),(void *)(a))
#define gsc_writel(v,a) _gsc_writel((v),(void *)(a))
#define gsc_writeq(v,a) _gsc_writeq((v),(void *)(a))
struct gsc_dev {
struct gsc_bus *bus; /* bus this device is on */
struct gsc_dev *next; /* chain of all devices */
struct gsc_dev *next_bus; /* chain of all devices on a bus */
struct gsc_dev *next_submod; /* chain of all devices on a module */
unsigned irq; /* irq generated by this device */
void *hpa; /* hard physical address */
u16 hversion;
u8 spa; /* SPA requirements */
u8 type;
u32 sversion;
};
#define gsc_readb(x) __raw_readb((unsigned long)x)
#define gsc_readw(x) __raw_readw((unsigned long)x)
#define gsc_readl(x) __raw_readl((unsigned long)x)
#define gsc_writeb(x, y) __raw_writeb(x, (unsigned long)y)
#define gsc_writew(x, y) __raw_writew(x, (unsigned long)y)
#define gsc_writel(x, y) __raw_writel(x, (unsigned long)y)
struct gsc_irq {
unsigned long txn_addr; /* IRQ "target" */
......@@ -59,21 +27,5 @@ struct gsc_irq {
extern int gsc_alloc_irq(struct gsc_irq *dev); /* dev needs an irq */
extern int gsc_claim_irq(struct gsc_irq *dev, int irq); /* dev needs this irq */
struct gsc_bus {
void *hpa; /* HPA of device 0, function 0 of this bus */
};
/*
* There is one gsc_dev structure for each slot-number/function-number
* combination:
*/
struct gsc_dev *gsc_find_device(u16 hversion, struct gsc_dev *from);
extern void probe_serial_gsc(void);
/* returns a virtual irq for device at dev->hpa (works for all LASI/ASP/WAX) */
extern int busdevice_alloc_irq( struct hp_device *dev );
#endif /* __KERNEL__ */
#endif /* LINUX_GSC_H */
/* hardirq.h: 32-bit Sparc hard IRQ support.
/* hardirq.h: PA-RISC hard IRQ support.
*
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
* Copyright (C) 1998-99 Anton Blanchard (anton@progsoc.uts.edu.au)
* Copyright (C) 2001 Matthew Wilcox <matthew@wil.cx>
*
* The locking is really quite interesting. There's a cpu-local
* count of how many interrupts are being handled, and a global
* lock. An interrupt can only be serviced if the global lock
* is free. You can't be sure no more interrupts are being
* serviced until you've acquired the lock and then checked
* all the per-cpu interrupt counts are all zero. It's a specialised
* br_lock, and that's exactly how Sparc does it. We don't because
* it's more locking for us. This way is lock-free in the interrupt path.
*/
#ifndef __PARISC_HARDIRQ_H
#define __PARISC_HARDIRQ_H
#ifndef _PARISC_HARDIRQ_H
#define _PARISC_HARDIRQ_H
#include <linux/config.h>
#include <linux/threads.h>
#include <linux/cache.h>
typedef struct {
unsigned int __softirq_active;
unsigned int __softirq_mask;
unsigned int __local_irq_count;
unsigned int __local_bh_count;
unsigned long __softirq_pending; /* set_bit is used on this */
unsigned int __syscall_count;
struct task_struct * __ksoftirqd_task;
unsigned long idle_timestamp;
} ____cacheline_aligned irq_cpustat_t;
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
/*
* Are we in an interrupt context? Either doing bottom half
* or hardware interrupt processing?
* We put the hardirq and softirq counter into the preemption counter. The bitmask has the
* following meaning:
*
* - bits 0-7 are the preemption count (max preemption depth: 256)
* - bits 8-15 are the softirq count (max # of softirqs: 256)
* - bits 16-31 are the hardirq count (max # of hardirqs: 65536)
*
* - (bit 63 is the PREEMPT_ACTIVE flag---not currently implemented.)
*
* PREEMPT_MASK: 0x000000ff
* SOFTIRQ_MASK: 0x0000ff00
* HARDIRQ_MASK: 0xffff0000
*/
#define in_interrupt() ({ int __cpu = smp_processor_id(); \
(local_irq_count(__cpu) + local_bh_count(__cpu) != 0); })
#define in_irq() ({ int __cpu = smp_processor_id(); \
(local_irq_count(__cpu) != 0); })
#define PREEMPT_BITS 8
#define SOFTIRQ_BITS 8
#define HARDIRQ_BITS 16
#ifndef CONFIG_SMP
#define PREEMPT_SHIFT 0
#define SOFTIRQ_SHIFT (PREEMPT_SHIFT + PREEMPT_BITS)
#define HARDIRQ_SHIFT (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
#define hardirq_trylock(cpu) (local_irq_count(cpu) == 0)
#define hardirq_endlock(cpu) do { } while (0)
#define __MASK(x) ((1UL << (x))-1)
#define irq_enter(cpu, irq) (local_irq_count(cpu)++)
#define irq_exit(cpu, irq) (local_irq_count(cpu)--)
#define PREEMPT_MASK (__MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
#define HARDIRQ_MASK (__MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
#define SOFTIRQ_MASK (__MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
#define synchronize_irq() barrier()
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
#define softirq_count() (preempt_count() & SOFTIRQ_MASK)
#define irq_count() (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK))
#else
#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
/*
* The hardirq mask has to be large enough to have space for potentially all IRQ sources
* in the system nesting on a single CPU:
*/
#if (1 << HARDIRQ_BITS) < NR_IRQS
# error HARDIRQ_BITS is too low!
#endif
/*
* Are we doing bottom half or hardware interrupt processing?
* Are we in a softirq context?
* Interrupt context?
*/
#define in_irq() (hardirq_count())
#define in_softirq() (softirq_count())
#define in_interrupt() (irq_count())
#include <asm/atomic.h>
#include <linux/spinlock.h>
#include <asm/system.h>
#include <asm/smp.h>
extern unsigned char global_irq_holder;
extern spinlock_t global_irq_lock;
extern atomic_t global_irq_count;
static inline void release_irqlock(int cpu)
{
/* if we didn't own the irq lock, just ignore.. */
if (global_irq_holder == (unsigned char) cpu) {
global_irq_holder = NO_PROC_ID;
spin_unlock(&global_irq_lock);
}
}
static inline void irq_enter(int cpu)
{
++local_irq_count(cpu);
atomic_inc(&global_irq_count);
}
static inline void irq_exit(int cpu)
{
atomic_dec(&global_irq_count);
--local_irq_count(cpu);
}
static inline int hardirq_trylock(int cpu)
{
return (! atomic_read(&global_irq_count) &&
! spin_is_locked (&global_irq_lock));
}
#define hardirq_endlock(cpu) do { } while (0)
extern void synchronize_irq(void);
#define hardirq_trylock() (!in_interrupt())
#define hardirq_endlock() do { } while (0)
#define irq_enter() (preempt_count() += HARDIRQ_OFFSET)
#if CONFIG_PREEMPT
# error CONFIG_PREEMT currently not supported.
# define in_atomic() BUG()
# define IRQ_EXIT_OFFSET (HARDIRQ_OFFSET-1)
#else
# define in_atomic() (preempt_count() != 0)
# define IRQ_EXIT_OFFSET HARDIRQ_OFFSET
#endif
#define irq_exit() \
do { \
preempt_count() -= IRQ_EXIT_OFFSET; \
if (!in_interrupt() && softirq_pending(smp_processor_id())) \
do_softirq(); \
preempt_enable_no_resched(); \
} while (0)
#ifdef CONFIG_SMP
extern void synchronize_irq (unsigned int irq);
#else
# define synchronize_irq(irq) barrier()
#endif /* CONFIG_SMP */
#endif /* __PARISC_HARDIRQ_H */
#endif /* _PARISC_HARDIRQ_H */
#ifndef _PARISC_HP_MACHINES_H_
#define _PARISC_HP_MACHINES_H_
#ifndef _PARISC_HARDWARE_H
#define _PARISC_HARDWARE_H
#include <asm/pdc.h>
struct parisc_device_id {
unsigned char hw_type; /* 5 bits used */
unsigned char hversion_rev; /* 4 bits */
unsigned short hversion; /* 12 bits */
unsigned int sversion; /* 20 bits */
};
#define HWTYPE_ANY_ID 0xff
#define HVERSION_REV_ANY_ID 0xff
#define HVERSION_ANY_ID 0xffff
#define SVERSION_ANY_ID 0xffffffffU
struct hp_hardware {
unsigned short hw_type:5; /* HPHW_xxx */
unsigned short hversion;
unsigned long sversion:28;
unsigned short opt;
char *name;
unsigned short hw_type:5; /* HPHW_xxx */
unsigned short hversion;
unsigned long sversion:28;
unsigned short opt;
const char name[80]; /* The hardware description */
};
struct hp_device {
unsigned short hw_type:5; /* HPHW_xxx */
unsigned short hversion; /* HP-UX uses hv_model:12 */
unsigned int sversion; /* HP-UX uses sv_model:20 sv_opt:8 */
unsigned short opt;
unsigned int hversion_rev;
unsigned int sversion_rev;
struct hp_hardware * reference; /* This is a pointer to the
reference */
unsigned int managed; /* this is if the device has a driver for it */
void * hpa;
struct parisc_device {
unsigned long hpa; /* Hard Physical Address */
struct parisc_device_id id;
struct parisc_device *parent;
struct parisc_device *sibling;
struct parisc_device *child;
struct parisc_driver *driver; /* Driver for this device */
void *sysdata; /* Driver instance private data */
char name[80]; /* The hardware description */
int irq;
char hw_path; /* The module number on this bus */
unsigned int num_addrs; /* some devices have additional address ranges. */
unsigned long *addr; /* which will be stored here */
#ifdef __LP64__
/* parms for pdc_pat_cell_module() call */
unsigned long pcell_loc; /* Physical Cell location */
unsigned long mod_index; /* PAT specific - Misc Module info */
unsigned long pcell_loc; /* Physical Cell location */
unsigned long mod_index; /* PAT specific - Misc Module info */
/* generic info returned from pdc_pat_cell_module() */
unsigned long mod_info; /* PAT specific - Misc Module info */
unsigned long pmod_loc; /* physical Module location */
unsigned long mod_path; /* Module HW path */
unsigned long mod_info; /* PAT specific - Misc Module info */
unsigned long pmod_loc; /* physical Module location */
#endif
};
......@@ -43,33 +59,56 @@ enum cpu_type {
pcxu = 6, /* pa8000 pa 2.0 */
pcxu_ = 7, /* pa8200 (u+) pa 2.0 */
pcxw = 8, /* pa8500 pa 2.0 */
pcxw_ = 9 /* pa8600 (w+) pa 2.0 */
pcxw_ = 9, /* pa8600 (w+) pa 2.0 */
pcxw2 = 10 /* pa8700 pa 2.0 */
};
extern char *cpu_name_version[][2]; /* mapping from enum cpu_type to strings */
struct pa_iodc_driver {
unsigned short hw_type:5; /* HPHW_xxx */
unsigned short hversion;
unsigned short hversion_rev;
unsigned long sversion:28;
unsigned short sversion_rev;
unsigned short opt;
unsigned int check; /* Components that are significant */
struct parisc_driver {
struct parisc_driver *next;
char *name;
char *version;
int (* callback)(struct hp_device *d, struct pa_iodc_driver *dri);
const struct parisc_device_id *id_table;
int (*probe) (struct parisc_device *dev); /* New device discovered */
};
#define DRIVER_CHECK_HWTYPE 1
#define DRIVER_CHECK_HVERSION 2
#define DRIVER_CHECK_SVERSION 4
#define DRIVER_CHECK_OPT 8
/* The following two are useless right now */
#define DRIVER_CHECK_HVERSION_REV 16
#define DRIVER_CHECK_SVERSION_REV 32
#define DRIVER_CHECK_EVERYTHING 63
struct io_module {
volatile uint32_t nothing; /* reg 0 */
volatile uint32_t io_eim;
volatile uint32_t io_dc_adata;
volatile uint32_t io_ii_cdata;
volatile uint32_t io_dma_link; /* reg 4 */
volatile uint32_t io_dma_command;
volatile uint32_t io_dma_address;
volatile uint32_t io_dma_count;
volatile uint32_t io_flex; /* reg 8 */
volatile uint32_t io_spa_address;
volatile uint32_t reserved1[2];
volatile uint32_t io_command; /* reg 12 */
volatile uint32_t io_status;
volatile uint32_t io_control;
volatile uint32_t io_data;
volatile uint32_t reserved2; /* reg 16 */
volatile uint32_t chain_addr;
volatile uint32_t sub_mask_clr;
volatile uint32_t reserved3[13];
volatile uint32_t undefined[480];
volatile uint32_t unpriv[512];
};
struct bc_module {
volatile uint32_t unused1[12];
volatile uint32_t io_command;
volatile uint32_t io_status;
volatile uint32_t io_control;
volatile uint32_t unused2[1];
volatile uint32_t io_err_resp;
volatile uint32_t io_err_info;
volatile uint32_t io_err_req;
volatile uint32_t unused3[11];
volatile uint32_t io_io_low;
volatile uint32_t io_io_high;
};
#define HPHW_NPROC 0
#define HPHW_MEMORY 1
......@@ -88,16 +127,33 @@ struct pa_iodc_driver {
#define HPHW_FABRIC 14
#define HPHW_FAULTY 31
extern struct hp_hardware hp_hardware_list[];
char *parisc_getHWtype( unsigned short hw_type );
/* hardware.c: */
extern const char *parisc_hardware_description(struct parisc_device_id *id);
extern enum cpu_type parisc_get_cpu_type(unsigned long hversion);
/* Attention: first hversion, then sversion...! */
char *parisc_getHWdescription( unsigned short hw_type,
unsigned long hversion, /* have to be long ! */
unsigned long sversion );
struct pci_dev;
enum cpu_type parisc_get_cpu_type( unsigned long hversion );
/* drivers.c: */
extern struct parisc_device *alloc_pa_dev(unsigned long hpa,
struct hardware_path *path);
extern int register_parisc_device(struct parisc_device *dev);
extern int register_parisc_driver(struct parisc_driver *driver);
extern int count_parisc_driver(struct parisc_driver *driver);
extern int unregister_parisc_driver(struct parisc_driver *driver);
extern void walk_central_bus(void);
extern void fixup_child_irqs(struct parisc_device *parent, int irqbase,
int (*choose)(struct parisc_device *parent));
extern void print_subdevices(struct parisc_device *dev);
extern const struct parisc_device *find_pa_parent_type(const struct parisc_device *, int);
extern void print_parisc_devices(void);
extern char *print_pa_hwpath(struct parisc_device *dev, char *path);
extern char *print_pci_hwpath(struct pci_dev *dev, char *path);
extern void get_pci_node_path(struct pci_dev *dev, struct hardware_path *path);
extern int register_driver(struct pa_iodc_driver *driver);
#endif
/* inventory.c: */
extern void do_memory_inventory(void);
extern void do_device_inventory(void);
#endif /* _PARISC_HARDWARE_H */
......@@ -7,7 +7,7 @@
* (c) 1999 Matthew Wilcox
*/
extern unsigned long hil_base; /* declared in drivers/gsc/hil.c */
extern unsigned long hil_base; /* declared in drivers/parisc/hil.c */
extern unsigned int hil_irq;
#define HILBASE hil_base /* 0xf0821000 (old) or 0xf0201000 (new) */
......
......@@ -5,29 +5,29 @@
*/
/*
* This file contains the i386 architecture specific IDE code.
* This file contains the PARISC architecture specific IDE code.
*/
#ifndef __ASMi386_IDE_H
#define __ASMi386_IDE_H
#ifndef __ASM_PARISC_IDE_H
#define __ASM_PARISC_IDE_H
#ifdef __KERNEL__
#include <linux/config.h>
#include <asm/superio.h>
#ifndef MAX_HWIFS
#define MAX_HWIFS 10
#define MAX_HWIFS 2
#endif
static __inline__ int ide_default_irq(ide_ioreg_t base)
{
switch (base) {
case 0x1f0: return 14;
case 0x170: return 15;
case 0x1e8: return 11;
case 0x168: return 10;
case 0x1e0: return 8;
case 0x160: return 12;
#ifdef CONFIG_SUPERIO
case 0x1f0:
case 0x170:
return superio_get_ide_irq();
#endif /* CONFIG_SUPERIO */
default:
return 0;
}
......@@ -36,12 +36,10 @@ static __inline__ int ide_default_irq(ide_ioreg_t base)
static __inline__ ide_ioreg_t ide_default_io_base(int index)
{
switch (index) {
case 0: return 0x1f0;
case 1: return 0x170;
case 2: return 0x1e8;
case 3: return 0x168;
case 4: return 0x1e0;
case 5: return 0x160;
#ifdef CONFIG_SUPERIO
case 0: return (superio_get_ide_irq() ? 0x1f0 : 0);
case 1: return (superio_get_ide_irq() ? 0x170 : 0);
#endif /* CONFIG_SUPERIO */
default:
return 0;
}
......@@ -63,6 +61,7 @@ static __inline__ void ide_init_hwif_ports(hw_regs_t *hw, ide_ioreg_t data_port,
}
if (irq != NULL)
*irq = 0;
hw->io_ports[IDE_IRQ_OFFSET] = 0;
}
static __inline__ void ide_init_default_hwifs(void)
......@@ -79,6 +78,19 @@ static __inline__ void ide_init_default_hwifs(void)
#endif
}
#define ide_request_irq(irq,hand,flg,dev,id) request_irq((irq),(hand),(flg),(dev),(id))
#define ide_free_irq(irq,dev_id) free_irq((irq), (dev_id))
#define ide_check_region(from,extent) check_region((from), (extent))
#define ide_request_region(from,extent,name) request_region((from), (extent), (name))
#define ide_release_region(from,extent) release_region((from), (extent))
/*
* The following are not needed for the non-m68k ports
*/
#define ide_ack_intr(hwif) (1)
#define ide_release_lock(lock) do {} while (0)
#define ide_get_lock(lock, hdlr, data) do {} while (0)
#endif /* __KERNEL__ */
#endif /* __ASMi386_IDE_H */
#endif /* __ASM_PARISC_IDE_H */
#ifndef _ASM_IO_H
#define _ASM_IO_H
/* USE_HPPA_IOREMAP IS THE MAGIC FLAG TO ENABLE OR DISABLE REAL IOREMAP() FUNCTIONALITY */
/* FOR 712 or 715 MACHINES THIS SHOULD BE ENABLED,
NEWER MACHINES STILL HAVE SOME ISSUES IN THE SCSI AND/OR NETWORK DRIVERS AND
BECAUSE OF THAT I WILL LEAVE IT DISABLED FOR NOW <deller@gmx.de> */
/* WHEN THOSE ISSUES ARE SOLVED, USE_HPPA_IOREMAP WILL GO AWAY */
#define USE_HPPA_IOREMAP 0
#include <linux/config.h>
#include <linux/types.h>
#include <asm/gsc.h>
#include <asm/pgtable.h>
#define virt_to_phys(a) ((unsigned long)__pa(a))
#define phys_to_virt(a) __va(a)
#define virt_to_bus virt_to_phys
#define bus_to_virt phys_to_virt
/*
* Change "struct page" to physical address.
*/
#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT)
/* Memory mapped IO */
extern void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
extern inline void * ioremap(unsigned long offset, unsigned long size)
{
return __ioremap(offset, size, 0);
}
/*
* This one maps high address device memory and turns off caching for that area.
* it's useful if some control registers are in such an area and write combining
* or read caching is not desirable:
*/
extern inline void * ioremap_nocache (unsigned long offset, unsigned long size)
{
return __ioremap(offset, size, _PAGE_NO_CACHE /* _PAGE_PCD */);
}
extern void iounmap(void *addr);
/*
* __raw_ variants have no defined meaning. on hppa, it means `i was
* too lazy to ioremap first'. kind of like isa_, except that there's
* no additional base address to add on.
*/
extern __inline__ unsigned char __raw_readb(unsigned long addr)
{
long flags;
unsigned char ret;
__asm__ __volatile__(
" rsm 2,%0\n"
" ldbx 0(%2),%1\n"
" mtsm %0\n"
: "=&r" (flags), "=r" (ret) : "r" (addr) );
return ret;
}
extern __inline__ unsigned short __raw_readw(unsigned long addr)
{
long flags;
unsigned short ret;
__asm__ __volatile__(
" rsm 2,%0\n"
" ldhx 0(%2),%1\n"
" mtsm %0\n"
: "=&r" (flags), "=r" (ret) : "r" (addr) );
return ret;
}
extern __inline__ unsigned int __raw_readl(unsigned long addr)
{
u32 ret;
__asm__ __volatile__(
" ldwax 0(%1),%0\n"
: "=r" (ret) : "r" (addr) );
return ret;
}
extern __inline__ unsigned long long __raw_readq(unsigned long addr)
{
unsigned long long ret;
#ifdef __LP64__
__asm__ __volatile__(
" ldda 0(%1),%0\n"
: "=r" (ret) : "r" (addr) );
#else
/* two reads may have side effects.. */
ret = ((u64) __raw_readl(addr)) << 32;
ret |= __raw_readl(addr+4);
#endif
return ret;
}
extern __inline__ void __raw_writeb(unsigned char val, unsigned long addr)
{
long flags;
__asm__ __volatile__(
" rsm 2,%0\n"
" stbs %1,0(%2)\n"
" mtsm %0\n"
: "=&r" (flags) : "r" (val), "r" (addr) );
}
extern __inline__ void __raw_writew(unsigned short val, unsigned long addr)
{
long flags;
__asm__ __volatile__(
" rsm 2,%0\n"
" sths %1,0(%2)\n"
" mtsm %0\n"
: "=&r" (flags) : "r" (val), "r" (addr) );
}
extern __inline__ void __raw_writel(unsigned int val, unsigned long addr)
{
__asm__ __volatile__(
" stwas %0,0(%1)\n"
: : "r" (val), "r" (addr) );
}
extern __inline__ void __raw_writeq(unsigned long long val, unsigned long addr)
{
#ifdef __LP64__
__asm__ __volatile__(
" stda %0,0(%1)\n"
: : "r" (val), "r" (addr) );
#else
/* two writes may have side effects.. */
__raw_writel(val >> 32, addr);
__raw_writel(val, addr+4);
#endif
}
#if USE_HPPA_IOREMAP
#define readb(addr) (*(volatile unsigned char *) (addr))
#define readw(addr) (*(volatile unsigned short *) (addr))
#define readl(addr) (*(volatile unsigned int *) (addr))
#define readq(addr) (*(volatile u64 *) (addr))
#define writeb(b,addr) (*(volatile unsigned char *) (addr) = (b))
#define writew(b,addr) (*(volatile unsigned short *) (addr) = (b))
#define writel(b,addr) (*(volatile unsigned int *) (addr) = (b))
#define writeq(b,addr) (*(volatile u64 *) (addr) = (b))
#else /* !USE_HPPA_IOREMAP */
#define readb(addr) __raw_readb((unsigned long)(addr))
#define readw(addr) le16_to_cpu(__raw_readw((unsigned long)(addr)))
#define readl(addr) le32_to_cpu(__raw_readl((unsigned long)(addr)))
#define readq(addr) le64_to_cpu(__raw_readq((unsigned long)(addr)))
#define writeb(b,addr) __raw_writeb(b,(unsigned long)(addr))
#define writew(b,addr) __raw_writew(cpu_to_le16(b),(unsigned long)(addr))
#define writel(b,addr) __raw_writel(cpu_to_le32(b),(unsigned long)(addr))
#define writeq(b,addr) __raw_writeq(cpu_to_le64(b),(unsigned long)(addr))
#endif /* !USE_HPPA_IOREMAP */
extern void memcpy_fromio(void *dest, unsigned long src, int count);
extern void memcpy_toio(unsigned long dest, const void *src, int count);
extern void memset_io(unsigned long dest, char fill, int count);
/* Support old drivers which don't ioremap.
* NB this interface is scheduled to disappear in 2.5
*/
#define EISA_BASE 0xfffffffffc000000UL
#define isa_readb(a) readb(EISA_BASE | (a))
#define isa_readw(a) readw(EISA_BASE | (a))
#define isa_readl(a) readl(EISA_BASE | (a))
#define isa_writeb(b,a) writeb((b), EISA_BASE | (a))
#define isa_writew(b,a) writew((b), EISA_BASE | (a))
#define isa_writel(b,a) writel((b), EISA_BASE | (a))
#define isa_memset_io(a,b,c) memset_io(EISA_BASE | (a), (b), (c))
#define isa_memcpy_fromio(a,b,c) memcpy_fromio((a), EISA_BASE | (b), (c))
#define isa_memcpy_toio(a,b,c) memcpy_toio(EISA_BASE | (a), (b), (c))
/*
* XXX - We don't have csum_partial_copy_fromio() yet, so we cheat here and
* just copy it. The net code will then do the checksum later. Presently
* only used by some shared memory 8390 Ethernet cards anyway.
*/
#define eth_io_copy_and_sum(skb,src,len,unused) \
memcpy_fromio((skb)->data,(src),(len))
#define isa_eth_io_copy_and_sum(skb,src,len,unused) \
isa_memcpy_fromio((skb)->data,(src),(len))
/* Port-space IO */
#define inb_p inb
#define inw_p inw
#define inl_p inl
......@@ -17,42 +202,66 @@
#define outw_p outw
#define outl_p outl
#define readb gsc_readb
#define readw gsc_readw
#define readl gsc_readl
#define writeb gsc_writeb
#define writew gsc_writew
#define writel gsc_writel
extern unsigned char eisa_in8(unsigned short port);
extern unsigned short eisa_in16(unsigned short port);
extern unsigned int eisa_in32(unsigned short port);
extern void eisa_out8(unsigned char data, unsigned short port);
extern void eisa_out16(unsigned short data, unsigned short port);
extern void eisa_out32(unsigned int data, unsigned short port);
#if defined(CONFIG_PCI)
extern unsigned char inb(int addr);
extern unsigned short inw(int addr);
extern unsigned int inl(int addr);
#if defined(CONFIG_PCI) || defined(CONFIG_ISA)
/*
* So we get clear link errors
*/
extern u8 inb(unsigned long addr);
extern u16 inw(unsigned long addr);
extern u32 inl(unsigned long addr);
extern void outb(unsigned char b, int addr);
extern void outw(unsigned short b, int addr);
extern void outl(unsigned int b, int addr);
#elif defined(CONFIG_EISA)
#define inb eisa_in8
#define inw eisa_in16
#define inl eisa_in32
#define outb eisa_out8
#define outw eisa_out16
#define outl eisa_out32
#else
static inline char inb(unsigned long addr)
{
BUG();
return -1;
}
extern void outb(unsigned char b, unsigned long addr);
extern void outw(unsigned short b, unsigned long addr);
extern void outl(u32 b, unsigned long addr);
static inline short inw(unsigned long addr)
{
BUG();
return -1;
}
static inline void memcpy_toio(void *dest, void *src, int count)
static inline int inl(unsigned long addr)
{
while(count--)
writeb(*((char *)src)++, (char *)dest++);
BUG();
return -1;
}
#define outb(x, y) BUG()
#define outw(x, y) BUG()
#define outl(x, y) BUG()
#endif
/*
* String versions of in/out ops:
*/
extern void insb (unsigned long port, void *dst, unsigned long count);
extern void insw (unsigned long port, void *dst, unsigned long count);
extern void insl (unsigned long port, void *dst, unsigned long count);
extern void outsb (unsigned long port, const void *src, unsigned long count);
extern void outsw (unsigned long port, const void *src, unsigned long count);
extern void outsl (unsigned long port, const void *src, unsigned long count);
/* IO Port space is : BBiiii where BB is HBA number. */
#define IO_SPACE_LIMIT 0x00ffffff
/* Right now we don't support Dino-on-a-card and V class which do PCI MMIO
* through address/data registers. */
#define ioremap(__offset, __size) ((void *)(__offset))
#define iounmap(__addr)
#define dma_cache_inv(_start,_size) do { flush_kernel_dcache_range(_start,_size); } while(0)
#define dma_cache_wback(_start,_size) do { flush_kernel_dcache_range(_start,_size); } while (0)
......
......@@ -69,6 +69,10 @@
#define TIOCSHAYESESP 0x545F /* Set Hayes ESP configuration */
#define FIOQSIZE 0x5460 /* Get exact space used by quota */
#define TIOCSTART 0x5461
#define TIOCSTOP 0x5462
#define TIOCSLTC 0x5462
/* Used for packet mode */
#define TIOCPKT_DATA 0
#define TIOCPKT_FLUSHREAD 1
......
......@@ -15,7 +15,7 @@
** fixup_irq is to initialize PCI IRQ line support and
** virtualize pcidev->irq value. To be called by pci_fixup_bus().
*/
extern void *iosapic_register(void *hpa);
extern void *iosapic_register(unsigned long hpa);
extern int iosapic_fixup_irq(void *obj, struct pci_dev *pcidev);
......
......@@ -2,10 +2,26 @@
#define __PARISC_IPCBUF_H__
/*
* The ipc64_perm structure for PA-RISC is identical to kern_ipc_perm
* as we have always had 32-bit UIDs and GIDs in the kernel.
* The ipc64_perm structure for PA-RISC is almost identical to
* kern_ipc_perm as we have always had 32-bit UIDs and GIDs in the kernel.
* 'seq' has been changed from long to int so that it's the same size
* on 64-bit kernels as on 32-bit ones.
*/
#define ipc64_perm kern_ipc_perm
struct ipc64_perm
{
key_t key;
uid_t uid;
gid_t gid;
uid_t cuid;
gid_t cgid;
unsigned short int __pad1;
mode_t mode;
unsigned short int __pad2;
unsigned short int seq;
unsigned int __pad3;
unsigned long long int __unused1;
unsigned long long int __unused2;
};
#endif /* __PARISC_IPCBUF_H__ */
#ifndef _ASM_IRQ_H
#define _ASM_IRQ_H
#include <linux/string.h>
#include <asm/ptrace.h>
#include <linux/interrupt.h>
#include <asm/types.h>
/*
* linux/include/asm/irq.h
* linux/include/asm-parisc/irq.h
*
* (C) 1992, 1993 Linus Torvalds, (C) 1997 Ingo Molnar,
* Copyright 1999 SuSE GmbH
......@@ -16,33 +8,39 @@
* <tomsoft@informatik.tu-chemnitz.de>
*/
#define CPU_IRQ_REGION 1
#define TIMER_IRQ (IRQ_FROM_REGION(CPU_IRQ_REGION) | 0)
#define IPI_IRQ (IRQ_FROM_REGION(CPU_IRQ_REGION) | 1)
#ifndef _ASM_PARISC_IRQ_H
#define _ASM_PARISC_IRQ_H
/* This should be 31 for PA1.1 binaries and 63 for PA-2.0 wide mode) */
#define MAX_CPU_IRQ (BITS_PER_LONG - 1)
#include <asm/ptrace.h>
#include <asm/types.h>
#if 1 /* set to 1 to get the new irq offsets, or ... */
# if BITS_PER_LONG == 32
# define IRQ_REGION_SHIFT 5
# else
# define IRQ_REGION_SHIFT 6
# endif
#else /* 256 irq-entries per region (wastes memory, maybe gains speed? :-))*/
# define IRQ_REGION_SHIFT 8
#endif
#include <linux/string.h>
#include <linux/interrupt.h>
#define CPU_IRQ_REGION 1
#define TIMER_IRQ (IRQ_FROM_REGION(CPU_IRQ_REGION) | 0)
#define IPI_IRQ (IRQ_FROM_REGION(CPU_IRQ_REGION) | 1)
#define IRQ_PER_REGION (1 << IRQ_REGION_SHIFT)
#define NR_IRQ_REGS 8
#define NR_IRQS (NR_IRQ_REGS * IRQ_PER_REGION)
/* This should be 31 for PA1.1 binaries and 63 for PA-2.0 wide mode */
#define MAX_CPU_IRQ (BITS_PER_LONG - 1)
#if BITS_PER_LONG == 32
# define IRQ_REGION_SHIFT 5
#else
# define IRQ_REGION_SHIFT 6
#endif
#define IRQ_PER_REGION (1 << IRQ_REGION_SHIFT)
#define NR_IRQ_REGS 16
#define NR_IRQS (NR_IRQ_REGS * IRQ_PER_REGION)
#define IRQ_REGION(irq) ((irq) >> IRQ_REGION_SHIFT)
#define IRQ_OFFSET(irq) ((irq) & ((1<<IRQ_REGION_SHIFT)-1))
#define IRQ_FROM_REGION(reg) ((reg) << IRQ_REGION_SHIFT)
#define IRQ_REG_DIS 1 /* support disable_irq / enable_irq */
#define IRQ_REG_MASK 2 /* require IRQs to be masked */
#define EISA_IRQ_REGION 0 /* region 0 needs to be reserved for EISA */
#define EISA_MAX_IRQS 16 /* max. (E)ISA irq line */
struct irq_region_ops {
void (*disable_irq)(void *dev, int irq);
......@@ -54,8 +52,8 @@ struct irq_region_ops {
struct irq_region_data {
void *dev;
const char *name;
unsigned flags;
int irqbase;
unsigned int status[IRQ_PER_REGION]; /* IRQ status */
};
struct irq_region {
......@@ -69,21 +67,31 @@ extern struct irq_region *irq_region[NR_IRQ_REGS];
static __inline__ int irq_cannonicalize(int irq)
{
#ifdef CONFIG_EISA
return (irq == (IRQ_FROM_REGION(EISA_IRQ_REGION)+2)
? (IRQ_FROM_REGION(EISA_IRQ_REGION)+9) : irq);
#else
return irq;
#endif
}
extern void disable_irq(int);
#define disable_irq_nosync(i) disable_irq(i)
extern void enable_irq(int);
extern void do_irq(struct irqaction *a, int i, struct pt_regs *p);
extern void do_irq_mask(unsigned long mask, struct irq_region *region,
struct pt_regs *regs);
extern struct irq_region *alloc_irq_region(int count, struct irq_region_ops *ops,
unsigned long flags, const char *name, void *dev);
const char *name, void *dev);
extern int txn_alloc_irq(void);
extern int txn_claim_irq(int);
extern unsigned int txn_alloc_data(int, unsigned int);
extern unsigned long txn_alloc_addr(int);
#endif /* _ASM_IRQ_H */
/* soft power switch support (power.c) */
extern struct tasklet_struct power_tasklet;
#endif /* _ASM_PARISC_IRQ_H */
/*
* WARNING WARNING WARNING WARNING WARNING WARNING WARNING WARNING
* ---------------------------------------------------------------
* This file will be removed as soon as we have converted
* hp_psaux.c and hp_keyb.c to the input layer !
*
*/
/*
* linux/include/asm-parisc/keyboard.h
*
* Original by Geert Uytterhoeven
* updates by Alex deVries <adevries@thepuffingroup.com>
* portions copyright (1999) The Puffin Group
* mostly rewritten by Philipp Rumpf <prumpf@tux.org>,
* Copyright 2000 Philipp Rumpf
*/
/*
* We try to keep the amount of generic code as low as possible -
* we want to support all HIL, PS/2, and untranslated USB keyboards
*/
#ifndef _PARISC_KEYBOARD_H
#define _PARISC_KEYBOARD_H
#include <linux/config.h>
#ifdef __KERNEL__
#ifdef CONFIG_VT
#include <linux/kernel.h>
#include <linux/kd.h>
/* These are basically the generic functions / variables. The only
* unexpected detail is the initialization sequence for the keyboard
* driver is something like this:
*
* detect keyboard port
* detect keyboard
* call register_kbd_ops
* wait for init_hw
*
* only after init_hw has been called you're allowed to call
* handle_scancode. This means you either have to be extremely
* careful or use a global flag or something - I strongly suggest
* the latter. prumpf */
extern struct kbd_ops {
int (*setkeycode)(unsigned int, unsigned int);
int (*getkeycode)(unsigned int);
int (*translate)(unsigned char, unsigned char *, char);
char (*unexpected_up)(unsigned char);
void (*leds)(unsigned char);
void (*init_hw)(void);
/* Keyboard driver resource allocation */
void (*kbd_request_region)(void);
int (*kbd_request_irq)(void (*handler)(int, void *, struct pt_regs *));
/* Methods to access the keyboard processor's I/O registers */
unsigned char (*kbd_read_input)(void);
void (*kbd_write_output)(unsigned char val);
void (*kbd_write_command)(unsigned char val);
unsigned char (*kbd_read_status)(void);
unsigned char sysrq_key;
unsigned char *sysrq_xlate;
} *kbd_ops;
#define kbd_setkeycode (*kbd_ops->setkeycode)
#define kbd_getkeycode (*kbd_ops->getkeycode)
#define kbd_translate (*kbd_ops->translate)
#define kbd_unexpected_up (*kbd_ops->unexpected_up)
#define kbd_leds (*kbd_ops->leds)
#define kbd_init_hw (*kbd_ops->init_hw)
#define SYSRQ_KEY (kbd_ops->sysrq_key)
#define kbd_sysrq_xlate (kbd_ops->sysrq_xlate)
/* Do the actual calls via kbd_ops vector */
#define kbd_request_region() kbd_ops->kbd_request_region()
#define kbd_request_irq(handler) kbd_ops->kbd_request_irq(handler)
#define kbd_read_input() kbd_ops->kbd_read_input()
#define kbd_write_output(val) kbd_ops->kbd_write_output(val)
#define kbd_write_command(val) kbd_ops->kbd_write_command(val)
#define kbd_read_status() kbd_ops->kbd_read_status()
extern unsigned char hp_ps2kbd_sysrq_xlate[128]; /* from drivers/char/hp_keyb.c */
extern void unregister_kbd_ops(void);
extern void register_kbd_ops(struct kbd_ops *ops);
#endif /* CONFIG_VT */
#endif /* __KERNEL__ */
#endif /* __ASMPARISC_KEYBOARD_H */
#ifndef _ASM_KMAP_TYPES_H
#define _ASM_KMAP_TYPES_H
#include <linux/config.h>
#if CONFIG_DEBUG_HIGHMEM
# define D(n) __KM_FENCE_##n ,
#else
# define D(n)
#endif
enum km_type {
D(0) KM_BOUNCE_READ,
D(1) KM_SKB_SUNRPC_DATA,
D(2) KM_SKB_DATA_SOFTIRQ,
D(3) KM_USER0,
D(4) KM_USER1,
D(5) KM_BIO_SRC_IRQ,
D(6) KM_BIO_DST_IRQ,
D(7) KM_PTE0,
D(8) KM_PTE1,
D(9) KM_IRQ0,
D(10) KM_IRQ1,
D(11) KM_TYPE_NR
};
#undef D
#endif
#ifndef LED_H
#define LED_H
#define LED7 0x80 /* top (or furthest right) LED */
#define LED6 0x40
#define LED5 0x20
......@@ -16,18 +15,27 @@
#define LED_DISK_IO LED2 /* for disk activity */
#define LED_HEARTBEAT LED3 /* heartbeat */
/* values for pdc_chassis_lcd_info_ret_block.model: */
#define DISPLAY_MODEL_LCD 0 /* KittyHawk LED or LCD */
#define DISPLAY_MODEL_NONE 1 /* no LED or LCD */
#define DISPLAY_MODEL_LASI 2 /* LASI style 8 bit LED */
#define DISPLAY_MODEL_OLD_ASP 0x7F /* faked: ASP style 8 x 1 bit LED (only very old ASP versions) */
#define LED_CMD_REG_NONE NULL /* NULL == no addr for the cmd register */
/* irq function */
extern void led_interrupt_func(void);
/* led tasklet struct */
extern struct tasklet_struct led_tasklet;
/* LASI & ASP specific LED initialization funcs */
extern void __init lasi_led_init( unsigned long lasi_hpa );
extern void __init asp_led_init( unsigned long led_ptr );
/* register_led_driver() */
int __init register_led_driver( int model, char *cmd_reg, char *data_reg );
/* registers the LED regions for procfs */
extern void __init register_led_regions(void);
void __init register_led_regions(void);
/* writes a string to the LCD display (if possible on this h/w) */
int lcd_print(char *str);
/* main LED initialization function (uses the PDC) */
extern int __init led_init(void);
/* main LED initialization function (uses PDC) */
int __init led_init(void);
#endif /* LED_H */
......@@ -4,6 +4,7 @@
#define PROT_READ 0x1 /* page can be read */
#define PROT_WRITE 0x2 /* page can be written */
#define PROT_EXEC 0x4 /* page can be executed */
#define PROT_SEM 0x8 /* page may be used for atomic ops */
#define PROT_NONE 0x0 /* page can not be accessed */
#define MAP_SHARED 0x01 /* Share changes */
......
/*
* parisc mmu structures
*/
#ifndef _PARISC_MMU_H_
#define _PARISC_MMU_H_
#ifndef __ASSEMBLY__
/* Default "unsigned long" context */
/* On parisc, we store the space id here */
typedef unsigned long mm_context_t;
/* Hardware Page Table Entry */
typedef struct _PTE {
unsigned long v:1; /* Entry is valid */
unsigned long tag:31; /* Unique Tag */
unsigned long r:1; /* referenced */
unsigned long os_1:1; /* */
unsigned long t:1; /* page reference trap */
unsigned long d:1; /* dirty */
unsigned long b:1; /* break */
unsigned long type:3; /* access type */
unsigned long pl1:2; /* PL1 (execute) */
unsigned long pl2:2; /* PL2 (write) */
unsigned long u:1; /* uncacheable */
unsigned long id:1; /* access id */
unsigned long os_2:1; /* */
unsigned long os_3:3; /* */
unsigned long res_1:4; /* */
unsigned long phys:20; /* physical page number */
unsigned long os_4:2; /* */
unsigned long res_2:3; /* */
unsigned long next; /* pointer to next page */
} PTE;
/*
* Simulated two-level MMU. This structure is used by the kernel
* to keep track of MMU mappings and is used to update/maintain
* the hardware HASH table which is really a cache of mappings.
*
* The simulated structures mimic the hardware available on other
* platforms, notably the 80x86 and 680x0.
*/
typedef struct _pte {
unsigned long page_num:20;
unsigned long flags:12; /* Page flags (some unused bits) */
} pte;
#define PD_SHIFT (10+12) /* Page directory */
#define PD_MASK 0x02FF
#define PT_SHIFT (12) /* Page Table */
#define PT_MASK 0x02FF
#define PG_SHIFT (12) /* Page Entry */
/* MMU context */
typedef struct _MMU_context {
long pid[4];
pte **pmap; /* Two-level page-map structure */
} MMU_context;
#endif /* __ASSEMBLY__ */
#endif /* _PARISC_MMU_H_ */
#ifndef __PARISC_MMU_CONTEXT_H
#define __PARISC_MMU_CONTEXT_H
#include <linux/mm.h>
#include <asm/atomic.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk, unsigned cpu)
{
}
......@@ -14,17 +19,10 @@ extern void free_sid(unsigned long);
static inline int
init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
/*
* Init_new_context can be called for a cloned mm, so we
* only allocate a space id if one hasn't been allocated
* yet AND mm != &init_mm (cloned kernel thread which
* will run in the kernel space with spaceid 0).
*/
if ((mm != &init_mm) && (mm->context == 0)) {
mm->context = alloc_sid();
}
if (atomic_read(&mm->mm_users) != 1)
BUG();
mm->context = alloc_sid();
return 0;
}
......@@ -35,15 +33,22 @@ destroy_context(struct mm_struct *mm)
mm->context = 0;
}
static inline void load_context(mm_context_t context)
{
mtsp(context, 3);
#if SPACEID_SHIFT == 0
mtctl(context << 1,8);
#else
mtctl(context >> (SPACEID_SHIFT - 1),8);
#endif
}
static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, struct task_struct *tsk, unsigned cpu)
{
if (prev != next) {
/* Re-load page tables */
tsk->thread.pg_tables = __pa(next->pgd);
mtctl(tsk->thread.pg_tables, 25);
mtsp(next->context,3);
mtctl(__pa(next->pgd), 25);
load_context(next->context);
}
}
......
#ifndef _PARISC_MMZONE_H
#define _PARISC_MMZONE_H
struct node_map_data {
pg_data_t pg_data;
struct page *adj_node_mem_map;
};
extern struct node_map_data node_data[];
extern unsigned char *chunkmap;
#define BADCHUNK ((unsigned char)0xff)
#define CHUNKSZ (256*1024*1024)
#define CHUNKSHIFT 28
#define CHUNKMASK (~(CHUNKSZ - 1))
#define CHUNKNUM(paddr) ((paddr) >> CHUNKSHIFT)
#define NODE_DATA(nid) (&node_data[nid].pg_data)
#define NODE_MEM_MAP(nid) (NODE_DATA(nid)->node_mem_map)
#define ADJ_NODE_MEM_MAP(nid) (node_data[nid].adj_node_mem_map)
#define phys_to_page(paddr) \
(ADJ_NODE_MEM_MAP(chunkmap[CHUNKNUM((paddr))]) \
+ ((paddr) >> PAGE_SHIFT))
#define virt_to_page(kvaddr) phys_to_page(__pa(kvaddr))
/* This is kind of bogus, need to investigate performance of doing it right */
#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
#endif /* !_PARISC_MMZONE_H */
#ifndef _ASM_PARISC_MODULE_H
#define _ASM_PARISC_MODULE_H
/*
* This file contains the parisc architecture specific module code.
*/
#define module_map(x) vmalloc(x)
#define module_unmap(x) vfree(x)
#define module_arch_init(x) (0)
#define arch_init_modules(x) do { } while (0)
#endif /* _ASM_PARISC_MODULE_H */
......@@ -13,11 +13,17 @@
struct msqid64_ds {
struct ipc64_perm msg_perm;
#ifndef __LP64__
unsigned int __pad1;
#endif
__kernel_time_t msg_stime; /* last msgsnd time */
#ifndef __LP64__
unsigned int __pad2;
#endif
__kernel_time_t msg_rtime; /* last msgrcv time */
#ifndef __LP64__
unsigned int __pad3;
#endif
__kernel_time_t msg_ctime; /* last change time */
unsigned int msg_cbytes; /* current number of bytes on queue */
unsigned int msg_qnum; /* number of messages in queue */
......
......@@ -9,11 +9,31 @@
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#include <asm/cache.h>
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
#define copy_page(to,from) memcpy((void *)(to), (void *)(from), PAGE_SIZE)
#define copy_page(to,from) copy_user_page_asm((void *)(to), (void *)(from))
struct page;
#define clear_user_page(page, vaddr) clear_page(page)
#define copy_user_page(to, from, vaddr) copy_page(to, from)
extern void purge_kernel_dcache_page(unsigned long);
extern void copy_user_page_asm(void *to, void *from);
extern void clear_user_page_asm(void *page, unsigned long vaddr);
static inline void
copy_user_page(void *vto, void *vfrom, unsigned long vaddr, struct page *pg)
{
copy_user_page_asm(vto, vfrom);
flush_kernel_dcache_page(vto);
/* XXX: ppc flushes icache too, should we? */
}
static inline void
clear_user_page(void *page, unsigned long vaddr, struct page *pg)
{
purge_kernel_dcache_page((unsigned long)page);
clear_user_page_asm(page, vaddr);
}
/*
* These are used to make use of C type-checking..
......@@ -47,6 +67,20 @@ extern __inline__ int get_order(unsigned long size)
return order;
}
#ifdef __LP64__
#define MAX_PHYSMEM_RANGES 8 /* Fix the size for now (current known max is 3) */
#else
#define MAX_PHYSMEM_RANGES 1 /* First range is only range that fits in 32 bits */
#endif
typedef struct __physmem_range {
unsigned long start_pfn;
unsigned long pages; /* PAGE_SIZE pages */
} physmem_range_t;
extern physmem_range_t pmem_ranges[];
extern int npmem_ranges;
#endif /* !__ASSEMBLY__ */
/* to align the pointer to the (next) page boundary */
......@@ -68,7 +102,7 @@ extern __inline__ int get_order(unsigned long size)
#define LINUX_GATEWAY_SPACE 0
#define __PAGE_OFFSET (0xc0000000)
#define __PAGE_OFFSET (0x10000000)
#define PAGE_OFFSET ((unsigned long)__PAGE_OFFSET)
/* These macros don't work for 64-bit C code -- don't allow in C at all */
......@@ -78,8 +112,16 @@ extern __inline__ int get_order(unsigned long size)
#endif
#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
#define __va(x) ((void *)((unsigned long)(x)+PAGE_OFFSET))
#define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT))
#define pfn_to_page(pfn) (mem_map + (pfn))
#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#ifndef CONFIG_DISCONTIGMEM
#define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT))
#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
#endif /* !CONFIG_DISCONTIGMEM */
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
......
#ifndef _ASMPARISC_PARAM_H
#define _ASMPARISC_PARAM_H
#ifdef __KERNEL__
# ifdef CONFIG_PA20
# define HZ 1000 /* Faster machines */
# else
# define HZ 100 /* Internal kernel timer frequency */
# endif
# define USER_HZ 100 /* .. some user interfaces are in "ticks" */
# define CLOCKS_PER_SEC (USER_HZ) /* like times() */
#endif
#ifndef HZ
#define HZ 100
#endif
......@@ -17,8 +27,4 @@
#define MAXHOSTNAMELEN 64 /* max length of hostname */
#ifdef __KERNEL__
# define CLOCKS_PER_SEC HZ /* frequency at which times() counts */
#endif
#endif
......@@ -3,9 +3,6 @@
#include <asm/scatterlist.h>
#define MIN_PCI_PORT 0x000000
#define MAX_PCI_PORT 0xffffff
/*
** HP PCI platforms generally support multiple bus adapters.
** (workstations 1-~4, servers 2-~32)
......@@ -19,7 +16,7 @@
#define PCI_MAX_BUSSES 256
/* [soapbox on]
** Who the hell can develope stuff without ASSERT or VASSERT?
** Who the hell can develop stuff without ASSERT or VASSERT?
** No one understands all the modules across all platforms.
** For linux add another dimension - processor architectures.
**
......@@ -49,18 +46,40 @@
** Data needed by pcibios layer belongs here.
*/
struct pci_hba_data {
struct pci_hba_data *next; /* global chain of HBAs */
char *base_addr; /* aka Host Physical Address */
struct hp_device *iodc_info; /* Info from PA bus walk */
unsigned long base_addr; /* aka Host Physical Address */
const struct parisc_device *dev; /* device from PA bus walk */
struct pci_bus *hba_bus; /* primary PCI bus below HBA */
int hba_num; /* I/O port space access "key" */
struct resource bus_num; /* PCI bus numbers */
struct resource io_space; /* PIOP */
struct resource mem_space; /* LMMIO */
unsigned long mem_space_offset; /* VCLASS support */
struct resource lmmio_space; /* bus addresses < 4Gb */
struct resource elmmio_space; /* additional bus addresses < 4Gb */
unsigned long lmmio_space_offset; /* CPU view - PCI view */
void * iommu; /* IOMMU this device is under */
/* REVISIT - spinlock to protect resources? */
};
#define HBA_DATA(d) ((struct pci_hba_data *) (d))
/*
** We support 2^16 I/O ports per HBA. These are set up in the form
** 0xbbxxxx, where bb is the bus number and xxxx is the I/O port
** space address.
*/
#define HBA_PORT_SPACE_BITS 16
#define HBA_PORT_BASE(h) ((h) << HBA_PORT_SPACE_BITS)
#define HBA_PORT_SPACE_SIZE (1UL << HBA_PORT_SPACE_BITS)
#define PCI_PORT_HBA(a) ((a) >> HBA_PORT_SPACE_BITS)
#define PCI_PORT_ADDR(a) ((a) & (HBA_PORT_SPACE_SIZE - 1))
/*
** Convert between PCI (IO_VIEW) addresses and processor (PA_VIEW) addresses.
** Note that we currently support only LMMIO.
*/
#define PCI_BUS_ADDR(hba,a) ((a) - hba->lmmio_space_offset)
#define PCI_HOST_ADDR(hba,a) ((a) + hba->lmmio_space_offset)
/*
** KLUGE: linux/pci.h include asm/pci.h BEFORE declaring struct pci_bus
......@@ -69,6 +88,12 @@ struct pci_hba_data {
struct pci_bus;
struct pci_dev;
/* The PCI address space does equal the physical memory
* address space. The networking and block device layers use
* this boolean for bounce buffer decisions.
*/
#define PCI_DMA_BUS_IS_PHYS (1)
/*
** Most PCI devices (eg Tulip, NCR720) also export the same registers
** to both MMIO and I/O port space. Due to poor performance of I/O Port
......@@ -106,9 +131,6 @@ struct pci_bios_ops {
void (*fixup_bus)(struct pci_bus *bus);
};
extern void pcibios_size_bridge(struct pci_bus *, struct pbus_set_ranges_data *);
/*
** See Documentation/DMA-mapping.txt
*/
......@@ -127,8 +149,8 @@ struct pci_dma_ops {
/*
** We could live without the hppa_dma_ops indirection if we didn't want
** to support 4 different dma models with one binary or they were
** all loadable modules:
** to support 4 different coherent dma models with one binary (they will
** someday be loadable modules):
** I/O MMU consistent method dma_sync behavior
** ============= ====================== =======================
** a) PA-7x00LC uncachable host memory flush/purge
......@@ -144,8 +166,11 @@ struct pci_dma_ops {
*/
extern struct pci_dma_ops *hppa_dma_ops;
#ifdef CONFIG_PA11
extern struct pci_dma_ops pcxl_dma_ops;
extern struct pci_dma_ops pcx_dma_ops;
#endif
/*
** Oops hard if we haven't setup hppa_dma_ops by the time the first driver
......@@ -155,7 +180,9 @@ extern struct pci_dma_ops pcx_dma_ops;
*/
static inline int pci_dma_panic(char *msg)
{
extern void panic(const char *, ...); /* linux/kernel.h */
panic(msg);
/* NOTREACHED */
return -1;
}
......@@ -196,16 +223,32 @@ static inline int pci_dma_panic(char *msg)
hppa_dma_ops->dma_sync_sg(p, sg, n, d); \
}
/* No highmem on parisc, plus we have an IOMMU, so mapping pages is easy. */
#define pci_map_page(dev, page, off, size, dir) \
pci_map_single(dev, (page_address(page) + (off)), size, dir)
#define pci_unmap_page(dev,addr,sz,dir) pci_unmap_single(dev,addr,sz,dir)
/* Don't support DAC yet. */
#define pci_dac_dma_supported(pci_dev, mask) (0)
/*
** Stuff declared in arch/parisc/kernel/pci.c
*/
extern struct pci_port_ops *pci_port;
extern struct pci_bios_ops *pci_bios;
extern int pci_post_reset_delay; /* delay after de-asserting #RESET */
extern int pci_hba_count;
extern struct pci_hba_data *parisc_pci_hba[];
#ifdef CONFIG_PCI
extern void pcibios_register_hba(struct pci_hba_data *);
extern void pcibios_set_master(struct pci_dev *);
extern void pcibios_assign_unassigned_resources(struct pci_bus *);
#else
extern inline void pcibios_register_hba(struct pci_hba_data *x)
{
}
#endif
/*
** used by drivers/pci/pci.c:pci_do_scan_bus()
......@@ -216,12 +259,7 @@ extern void pcibios_assign_unassigned_resources(struct pci_bus *);
** To date, only alpha sets this to one. We'll need to set this
** to zero for legacy platforms and one for PAT platforms.
*/
#ifdef __LP64__
extern int pdc_pat; /* arch/parisc/kernel/inventory.c */
#define pcibios_assign_all_busses() pdc_pat
#else
#define pcibios_assign_all_busses() 0
#endif
#define pcibios_assign_all_busses() (pdc_type == PDC_TYPE_PAT)
#define PCIBIOS_MIN_IO 0x10
#define PCIBIOS_MIN_MEM 0x1000 /* NBPG - but pci/setup-res.c dies */
......@@ -229,4 +267,32 @@ extern int pdc_pat; /* arch/parisc/kernel/inventory.c */
/* Return the index of the PCI controller for device PDEV. */
#define pci_controller_num(PDEV) (0)
#define GET_IOC(dev) ((struct ioc *)(HBA_DATA(dev->sysdata)->iommu))
#ifdef CONFIG_IOMMU_CCIO
struct parisc_device;
struct ioc;
void * ccio_get_iommu(const struct parisc_device *dev);
struct pci_dev * ccio_get_fake(const struct parisc_device *dev);
int ccio_request_resource(const struct parisc_device *dev,
struct resource *res);
int ccio_allocate_resource(const struct parisc_device *dev,
struct resource *res, unsigned long size,
unsigned long min, unsigned long max, unsigned long align,
void (*alignf)(void *, struct resource *, unsigned long, unsigned long),
void *alignf_data);
#else /* !CONFIG_IOMMU_CCIO */
#define ccio_get_iommu(dev) NULL
#define ccio_get_fake(dev) NULL
#define ccio_request_resource(dev, res) request_resource(&iomem_resource, res)
#define ccio_allocate_resource(dev, res, size, min, max, align, alignf, data) \
allocate_resource(&iomem_resource, res, size, min, max, \
align, alignf, data)
#endif /* !CONFIG_IOMMU_CCIO */
#ifdef CONFIG_IOMMU_SBA
struct parisc_device;
void * sba_get_iommu(struct parisc_device *dev);
#endif
#endif /* __ASM_PARISC_PCI_H */
......@@ -2,8 +2,27 @@
#define _PARISC_PDC_H
/*
PDC entry points...
*/
* PDC return values ...
* All PDC calls return a subset of these errors.
*/
#define PDC_WARN 3 /* Call completed with a warning */
#define PDC_REQ_ERR_1 2 /* See above */
#define PDC_REQ_ERR_0 1 /* Call would generate a requestor error */
#define PDC_OK 0 /* Call completed successfully */
#define PDC_BAD_PROC -1 /* Called non-existent procedure*/
#define PDC_BAD_OPTION -2 /* Called with non-existent option */
#define PDC_ERROR -3 /* Call could not complete without an error */
#define PDC_NE_MOD -5 /* Module not found */
#define PDC_NE_CELL_MOD -7 /* Cell module not found */
#define PDC_INVALID_ARG -10 /* Called with an invalid argument */
#define PDC_BUS_POW_WARN -12 /* Call could not complete in allowed power budget */
#define PDC_NOT_NARROW -17 /* Narrow mode not supported */
/*
* PDC entry points...
*/
#define PDC_POW_FAIL 1 /* perform a power-fail */
#define PDC_POW_FAIL_PREPARE 0 /* prepare for powerfail */
......@@ -12,7 +31,7 @@
#define PDC_CHASSIS_DISP 0 /* update chassis display */
#define PDC_CHASSIS_WARN 1 /* return chassis warnings */
#define PDC_CHASSIS_DISPWARN 2 /* update&return chassis status */
#define PDC_RETURN_CHASSIS_INFO 128 /* HVERSION dependend: return chassis LED/LCD info */
#define PDC_RETURN_CHASSIS_INFO 128 /* HVERSION dependent: return chassis LED/LCD info */
#define PDC_PIM 3 /* Get PIM data */
#define PDC_PIM_HPMC 0 /* Transfer HPMC data */
......@@ -26,52 +45,74 @@
#define PDC_MODEL_BOOTID 1 /* set the BOOT_ID */
#define PDC_MODEL_VERSIONS 2 /* returns cpu-internal versions*/
#define PDC_MODEL_SYSMODEL 3 /* return system model info */
#define PDC_MODEL_ENSPEC 4 /* ??? */
#define PDC_MODEL_DISPEC 5 /* ??? */
#define PDC_MODEL_ENSPEC 4 /* enable specific option */
#define PDC_MODEL_DISPEC 5 /* disable specific option */
#define PDC_MODEL_CPU_ID 6 /* returns cpu-id (only newer machines!) */
#define PDC_MODEL_CAPABILITIES 7 /* returns OS32/OS64-flags */
#define PDC_MODEL_GET_BOOT__OP 8 /* returns boot test options */
#define PDC_MODEL_SET_BOOT__OP 9 /* set boot test options */
#define PA89_INSTRUCTION_SET 0x4 /* capatibilies returned */
#define PA90_INSTRUCTION_SET 0x8
#define PDC_CACHE 5 /* return/set cache (& TLB) info*/
#define PDC_CACHE_INFO 0 /* returns information */
#define PDC_CACHE_SET_COH 1 /* set coherence state */
#define PDC_CACHE_RET_SPID 2 /* returns space-ID bits */
#define PDC_HPA 6 /* return HPA of processor */
#define PDC_HPA_PROCESSOR 0
#define PDC_HPA_MODULES 1
#define PDC_HPA 6 /* return HPA of processor */
#define PDC_HPA_PROCESSOR 0
#define PDC_HPA_MODULES 1
#define PDC_IODC 8 /* talk to IODC */
#define PDC_IODC_READ 0 /* read IODC entry point */
/* PDC_IODC_RI_* INDEX parameter of PDC_IODC_READ */
#define PDC_IODC_RI_DATA_BYTES 0 /* IODC Data Bytes */
/* 1, 2 obsolete - HVERSION dependent */
#define PDC_IODC_RI_INIT 3 /* Initialize module */
#define PDC_COPROC 7 /* Co-Processor (usually FP unit(s)) */
#define PDC_COPROC_CFG 0 /* Co-Processor Cfg (FP unit(s) enabled?) */
#define PDC_IODC 8 /* talk to IODC */
#define PDC_IODC_READ 0 /* read IODC entry point */
/* PDC_IODC_RI_ * INDEX parameter of PDC_IODC_READ */
#define PDC_IODC_RI_DATA_BYTES 0 /* IODC Data Bytes */
/* 1, 2 obsolete - HVERSION dependent*/
#define PDC_IODC_RI_INIT 3 /* Initialize module */
#define PDC_IODC_RI_IO 4 /* Module input/output */
#define PDC_IODC_RI_SPA 5 /* Module input/output */
#define PDC_IODC_RI_CONFIG 6 /* Module input/output */
/* 7 obsolete - HVERSION dependent */
/* 7 obsolete - HVERSION dependent */
#define PDC_IODC_RI_TEST 8 /* Module input/output */
#define PDC_IODC_RI_TLB 9 /* Module input/output */
#define PDC_IODC_NINIT 2 /* non-destructive init */
#define PDC_IODC_DINIT 3 /* destructive init */
#define PDC_IODC_MEMERR 4 /* check for memory errors */
#define PDC_IODC_INDEX_DATA 0 /* get first 16 bytes from mod IODC */
#define PDC_IODC_BUS_ERROR -4 /* bus error return value */
#define PDC_IODC_INVALID_INDEX -5 /* invalid index return value */
#define PDC_IODC_COUNT -6 /* count is too small */
#define PDC_TOD 9 /* time-of-day clock (TOD) */
#define PDC_TOD_READ 0 /* read TOD */
#define PDC_TOD_WRITE 1 /* write TOD */
#define PDC_TOD_ITIMER 2 /* calibrate Interval Timer (CR16) */
#define PDC_ADD_VALID 12 /* Memory validation PDC call */
#define PDC_ADD_VALID_VERIFY 0 /* Make PDC_ADD_VALID verify region */
#define PDC_IODC_NINIT 2 /* non-destructive init */
#define PDC_IODC_DINIT 3 /* destructive init */
#define PDC_IODC_MEMERR 4 /* check for memory errors */
#define PDC_IODC_INDEX_DATA 0 /* get first 16 bytes from mod IODC */
#define PDC_IODC_BUS_ERROR -4 /* bus error return value */
#define PDC_IODC_INVALID_INDEX -5 /* invalid index return value */
#define PDC_IODC_COUNT -6 /* count is too small */
#define PDC_TOD 9 /* time-of-day clock (TOD) */
#define PDC_TOD_READ 0 /* read TOD */
#define PDC_TOD_WRITE 1 /* write TOD */
#define PDC_TOD_ITIMER 2 /* calibrate Interval Timer (CR16) */
#define PDC_STABLE 10 /* stable storage (sprockets) */
#define PDC_STABLE_READ 0
#define PDC_STABLE_WRITE 1
#define PDC_STABLE_RETURN_SIZE 2
#define PDC_STABLE_VERIFY_CONTENTS 3
#define PDC_STABLE_INITIALIZE 4
#define PDC_NVOLATILE 11 /* often not implemented */
#define PDC_ADD_VALID 12 /* Memory validation PDC call */
#define PDC_ADD_VALID_VERIFY 0 /* Make PDC_ADD_VALID verify region */
#define PDC_INSTR 15 /* get instr to invoke PDCE_CHECK() */
#define PDC_PROC 16 /* (sprockets) */
#define PDC_CONFIG 16 /* (sprockets) */
#define PDC_CONFIG_DECONFIG 0
#define PDC_CONFIG_DRECONFIG 1
#define PDC_CONFIG_DRETURN_CONFIG 2
#define PDC_BLOCK_TLB 18 /* manage hardware block-TLB */
#define PDC_BTLB_INFO 0 /* returns parameter */
#define PDC_BTLB_INSERT 1 /* insert BTLB entry */
......@@ -82,92 +123,373 @@
#define PDC_TLB_INFO 0 /* returns parameter */
#define PDC_TLB_SETUP 1 /* set up miss handling */
#define PDC_SYSTEM_MAP 22 /* find system modules */
#define PDC_MEM 20 /* Manage memory */
#define PDC_MEM_MEMINFO 0
#define PDC_MEM_ADD_PAGE 1
#define PDC_MEM_CLEAR_PDT 2
#define PDC_MEM_READ_PDT 3
#define PDC_MEM_RESET_CLEAR 4
#define PDC_MEM_GOODMEM 5
#define PDC_MEM_TABLE 128 /* Non contig mem map (sprockets) */
#define PDC_MEM_RETURN_ADDRESS_TABLE PDC_MEM_TABLE
#define PDC_MEM_GET_MEMORY_SYSTEM_TABLES_SIZE 131
#define PDC_MEM_GET_MEMORY_SYSTEM_TABLES 132
#define PDC_MEM_GET_PHYSICAL_LOCATION_FROM_MEMORY_ADDRESS 133
#define PDC_MEM_RET_SBE_REPLACED 5 /* PDC_MEM return values */
#define PDC_MEM_RET_DUPLICATE_ENTRY 4
#define PDC_MEM_RET_BUF_SIZE_SMALL 1
#define PDC_MEM_RET_PDT_FULL -11
#define PDC_MEM_RET_INVALID_PHYSICAL_LOCATION ~0ULL
#ifndef __ASSEMBLY__
typedef struct {
unsigned long long baseAddr;
unsigned int pages;
unsigned int reserved;
} MemAddrTable_t;
#endif
#define PDC_PSW 21 /* Get/Set default System Mask */
#define PDC_PSW_MASK 0 /* Return mask */
#define PDC_PSW_GET_DEFAULTS 1 /* Return defaults */
#define PDC_PSW_SET_DEFAULTS 2 /* Set default */
#define PDC_PSW_ENDIAN_BIT 1 /* set for big endian */
#define PDC_PSW_WIDE_BIT 2 /* set for wide mode */
#define PDC_SYSTEM_MAP 22 /* find system modules */
#define PDC_FIND_MODULE 0
#define PDC_FIND_ADDRESS 1
#define PDC_TRANSLATE_PATH 2
#define PDC_SOFT_POWER 23 /* soft power switch */
#define PDC_SOFT_POWER_INFO 0 /* return info about the soft power switch */
#define PDC_SOFT_POWER_ENABLE 1 /* enable/disable soft power switch */
/* HVERSION dependent */
#define PDC_IO 135 /* log error info, reset IO system */
/* The PDC_MEM_MAP calls */
#define PDC_MEM_MAP 128 /* on s700: return page info */
#define PDC_MEM_MAP_HPA 0 /* returns hpa of a module */
#define PDC_EEPROM 129 /* EEPROM access */
#define PDC_EEPROM_READ_WORD 0
#define PDC_EEPROM_WRITE_WORD 1
#define PDC_EEPROM_READ_BYTE 2
#define PDC_EEPROM_WRITE_BYTE 3
#define PDC_EEPROM_EEPROM_PASSWORD -1000
#define PDC_NVM 130 /* NVM (non-volatile memory) access */
#define PDC_NVM_READ_WORD 0
#define PDC_NVM_WRITE_WORD 1
#define PDC_NVM_READ_BYTE 2
#define PDC_NVM_WRITE_BYTE 3
#define PDC_SEED_ERROR 132 /* (sprockets) */
#define PDC_IO 135 /* log error info, reset IO system */
#define PDC_IO_READ_AND_CLEAR_ERRORS 0
#define PDC_IO_READ_AND_LOG_ERRORS 1
#define PDC_IO_SUSPEND_USB 2
/* sets bits 6&7 (little endian) of the HcControl Register */
#define PDC_IO_USB_SUSPEND 0xC000000000000000
#define PDC_IO_EEPROM_IO_ERR_TABLE_FULL -5 /* return value */
#define PDC_IO_NO_SUSPEND -6 /* return value */
#define PDC_BROADCAST_RESET 136 /* reset all processors */
#define PDC_DO_RESET 0 /* option: perform a broadcast reset */
#define PDC_DO_FIRM_TEST_RESET 1 /* Do broadcast reset with bitmap */
#define PDC_BR_RECONFIGURATION 2 /* reset w/reconfiguration */
#define PDC_FIRM_TEST_MAGIC 0xab9ec36fUL /* for this reboot only */
#define PDC_LAN_STATION_ID 138 /* Hversion dependent mechanism for */
#define PDC_LAN_STATION_ID_READ 0 /* getting the lan station address */
#define PDC_BROADCAST_RESET 136 /* reset all processors */
#define PDC_DO_RESET 0UL /* option: perform a broadcast reset */
#define PDC_DO_FIRM_TEST_RESET 1UL /* Do broadcast reset with bitmap */
#define PDC_BR_RECONFIGURATION 2UL /* reset w/reconfiguration */
#define PDC_FIRM_TEST_MAGIC 0xab9ec36fUL /* for this reboot only */
#define PDC_LAN_STATION_ID_SIZE 6
#define PDC_LAN_STATION_ID 138 /* Hversion dependent mechanism for */
#define PDC_LAN_STATION_ID_READ 0 /* getting the lan station address */
#define PDC_CHECK_RANGES 139 /* (sprockets) */
#define PDC_LAN_STATION_ID_SIZE 6
#define PDC_NV_SECTIONS 141 /* (sprockets) */
/* Legacy PDC definitions for same stuff */
#define PDC_PCI_INDEX 147UL
#define PDC_PCI_GET_INT_TBL_SIZE 13UL
#define PDC_PCI_GET_INT_TBL 14UL
/* generic error codes returned by all PDC-functions */
#define PDC_WARN 3 /* Call completed with a warning */
#define PDC_REQ_ERR_1 2 /* See above */
#define PDC_REQ_ERR_0 1 /* Call would generate a requestor error */
#define PDC_OK 0 /* Call completed successfully */
#define PDC_BAD_PROC -1 /* Called non-existant procedure */
#define PDC_BAD_OPTION -2 /* Called with non-existant option */
#define PDC_ERROR -3 /* Call could not complete without an error */
#define PDC_INVALID_ARG -10 /* Called with an invalid argument */
#define PDC_BUS_POW_WARN -12 /* Call could not complete in allowed power budget */
/* The following are from the HPUX .h files, and are just for
compatibility */
#define PDC_RET_OK 0L /* Call completed successfully */
#define PDC_RET_NE_PROC -1L /* Non-existent procedure */
#define PDC_RET_NE_OPT -2L /* non-existant option - arg1 */
#define PDC_RET_NE_MOD -5L /* Module not found */
#define PDC_RET_NE_CELL_MOD -7L /* Cell module not found */
#define PDC_RET_INV_ARG -10L /* Invalid argument */
#define PDC_RET_NOT_NARROW -17L /* Narrow mode not supported */
/* Error codes for PDC_ADD_VALID */
#define PDC_ADD_VALID_WARN 3 /* Call completed with a warning */
#define PDC_ADD_VALID_REQ_ERR_1 2 /* See above */
#define PDC_ADD_VALID_REQ_ERR_0 1 /* Call would generate a requestor error */
#define PDC_ADD_VALID_OK 0 /* Call completed successfully */
#define PDC_ADD_VALID_BAD_OPTION -2 /* Called with non-existant option */
#define PDC_ADD_VALID_ERROR -3 /* Call could not complete without an error */
#define PDC_ADD_VALID_INVALID_ARG -10 /* Called with an invalid argument */
#define PDC_ADD_VALID_BUS_POW_WARN -12 /* Call could not complete in allowed power budget */
#define PDC_PERFORMANCE 142 /* performance monitoring */
/* The PDC_MEM_MAP calls */
#define PDC_SYSTEM_INFO 143 /* system information */
#define PDC_SYSINFO_RETURN_INFO_SIZE 0
#define PDC_SYSINFO_RRETURN_SYS_INFO 1
#define PDC_SYSINFO_RRETURN_ERRORS 2
#define PDC_SYSINFO_RRETURN_WARNINGS 3
#define PDC_SYSINFO_RETURN_REVISIONS 4
#define PDC_SYSINFO_RRETURN_DIAGNOSE 5
#define PDC_SYSINFO_RRETURN_HV_DIAGNOSE 1005
#define PDC_RDR 144 /* (sprockets) */
#define PDC_RDR_READ_BUFFER 0
#define PDC_RDR_READ_SINGLE 1
#define PDC_RDR_WRITE_SINGLE 2
#define PDC_INTRIGUE 145 /* (sprockets) */
#define PDC_INTRIGUE_WRITE_BUFFER 0
#define PDC_INTRIGUE_GET_SCRATCH_BUFSIZE 1
#define PDC_INTRIGUE_START_CPU_COUNTERS 2
#define PDC_INTRIGUE_STOP_CPU_COUNTERS 3
#define PDC_STI 146 /* STI access */
/* same as PDC_PCI_XXX values (see below) */
/* Legacy PDC definitions for same stuff */
#define PDC_PCI_INDEX 147
#define PDC_PCI_INTERFACE_INFO 0
#define PDC_PCI_SLOT_INFO 1
#define PDC_PCI_INFLIGHT_BYTES 2
#define PDC_PCI_READ_CONFIG 3
#define PDC_PCI_WRITE_CONFIG 4
#define PDC_PCI_READ_PCI_IO 5
#define PDC_PCI_WRITE_PCI_IO 6
#define PDC_PCI_READ_CONFIG_DELAY 7
#define PDC_PCI_UPDATE_CONFIG_DELAY 8
#define PDC_PCI_PCI_PATH_TO_PCI_HPA 9
#define PDC_PCI_PCI_HPA_TO_PCI_PATH 10
#define PDC_PCI_PCI_PATH_TO_PCI_BUS 11
#define PDC_PCI_PCI_RESERVED 12
#define PDC_PCI_PCI_INT_ROUTE_SIZE 13
#define PDC_PCI_GET_INT_TBL_SIZE PDC_PCI_PCI_INT_ROUTE_SIZE
#define PDC_PCI_PCI_INT_ROUTE 14
#define PDC_PCI_GET_INT_TBL PDC_PCI_PCI_INT_ROUTE
#define PDC_PCI_READ_MON_TYPE 15
#define PDC_PCI_WRITE_MON_TYPE 16
/* Get SCSI Interface Card info: SDTR, SCSI ID, mode (SE vs LVD) */
#define PDC_INITIATOR 163
#define PDC_GET_INITIATOR 0
#define PDC_SET_INITIATOR 1
#define PDC_DELETE_INITIATOR 2
#define PDC_RETURN_TABLE_SIZE 3
#define PDC_RETURN_TABLE 4
#define PDC_LINK 165 /* (sprockets) */
#define PDC_LINK_PCI_ENTRY_POINTS 0 /* list (Arg1) = 0 */
#define PDC_LINK_USB_ENTRY_POINTS 1 /* list (Arg1) = 1 */
#define PDC_MEM_MAP 128
#define PDC_MEM_MAP_HPA 0
/* constants for OS (NVM...) */
#define OS_ID_NONE 0
#define OS_ID_HPUX 1
#define OS_ID_MPEXL 2
#define OS_ID_OSF 3
#define OS_ID_LINUX OS_ID_HPUX
#define OS_ID_NONE 0 /* Undefined OS ID */
#define OS_ID_HPUX 1 /* HP-UX OS */
#define OS_ID_LINUX OS_ID_HPUX /* just use the same value as hpux */
#define OS_ID_MPEXL 2 /* MPE XL OS */
#define OS_ID_OSF 3 /* OSF OS */
#define OS_ID_HPRT 4 /* HP-RT OS */
#define OS_ID_NOVEL 5 /* NOVELL OS */
#define OS_ID_NT 6 /* NT OS */
/* constants for PDC_CHASSIS */
#define OSTAT_OFF 0
#define OSTAT_FLT 1
#define OSTAT_TEST 2
#define OSTAT_INIT 3
#define OSTAT_SHUT 4
#define OSTAT_WARN 5
#define OSTAT_RUN 6
#define OSTAT_ON 7
#define OSTAT_OFF 0
#define OSTAT_FLT 1
#define OSTAT_TEST 2
#define OSTAT_INIT 3
#define OSTAT_SHUT 4
#define OSTAT_WARN 5
#define OSTAT_RUN 6
#define OSTAT_ON 7
#ifdef __LP64__
/* PDC PAT CELL */
#define PDC_PAT_CELL 64L /* Interface for gaining and
* manipulating cell state within PD */
#define PDC_PAT_CELL_GET_NUMBER 0L /* Return Cell number */
#define PDC_PAT_CELL_GET_INFO 1L /* Returns info about Cell */
#define PDC_PAT_CELL_MODULE 2L /* Returns info about Module */
#define PDC_PAT_CELL_SET_ATTENTION 9L /* Set Cell Attention indicator */
#define PDC_PAT_CELL_NUMBER_TO_LOC 10L /* Cell Number -> Location */
#define PDC_PAT_CELL_WALK_FABRIC 11L /* Walk the Fabric */
#define PDC_PAT_CELL_GET_RDT_SIZE 12L /* Return Route Distance Table Sizes */
#define PDC_PAT_CELL_GET_RDT 13L /* Return Route Distance Tables */
#define PDC_PAT_CELL_GET_LOCAL_PDH_SZ 14L /* Read Local PDH Buffer Size*/
#define PDC_PAT_CELL_SET_LOCAL_PDH 15L /* Write Local PDH Buffer */
#define PDC_PAT_CELL_GET_REMOTE_PDH_SZ 16L /* Return Remote PDH Buffer Size */
#define PDC_PAT_CELL_GET_REMOTE_PDH 17L /* Read Remote PDH Buffer */
#define PDC_PAT_CELL_GET_DBG_INFO 128L /* Return DBG Buffer Info */
#define PDC_PAT_CELL_CHANGE_ALIAS 129L /* Change Non-Equivalent Alias Checking */
/*
** Arg to PDC_PAT_CELL_MODULE memaddr[4]
**
** Addresses on the Merced Bus != all Runway Bus addresses.
** This is intended for programming SBA/LBA chips range registers.
*/
#define IO_VIEW 0UL
#define PA_VIEW 1UL
/* PDC_PAT_CELL_MODULE entity type values */
#define PAT_ENTITY_CA 0 /* central agent */
#define PAT_ENTITY_PROC 1 /* processor */
#define PAT_ENTITY_MEM 2 /* memory controller */
#define PAT_ENTITY_SBA 3 /* system bus adapter */
#define PAT_ENTITY_LBA 4 /* local bus adapter */
#define PAT_ENTITY_PBC 5 /* processor bus converter */
#define PAT_ENTITY_XBC 6 /* crossbar fabric connect */
#define PAT_ENTITY_RC 7 /* fabric interconnect */
/* PDC_PAT_CELL_MODULE address range type values */
#define PAT_PBNUM 0 /* PCI Bus Number */
#define PAT_LMMIO 1 /* < 4G MMIO Space */
#define PAT_GMMIO 2 /* > 4G MMIO Space */
#define PAT_NPIOP 3 /* Non Postable I/O Port Space */
#define PAT_PIOP 4 /* Postable I/O Port Space */
#define PAT_AHPA 5 /* Additional HPA Space */
#define PAT_UFO 6 /* HPA Space (UFO for Mariposa) */
#define PAT_GNIP 7 /* GNI Reserved Space */
/* PDC PAT CHASSIS LOG */
#define PDC_PAT_CHASSIS_LOG 65L /* Platform logging & forward
** progress functions */
#define PDC_PAT_CHASSIS_WRITE_LOG 0L /* Write Log Entry */
#define PDC_PAT_CHASSIS_READ_LOG 1L /* Read Log Entry */
/* PDC PAT CPU */
#define PDC_PAT_CPU 67L /* Interface to CPU configuration
* within the protection domain */
#define PDC_PAT_CPU_INFO 0L /* Return CPU config info */
#define PDC_PAT_CPU_DELETE 1L /* Delete CPU */
#define PDC_PAT_CPU_ADD 2L /* Add CPU */
#define PDC_PAT_CPU_GET_NUMBER 3L /* Return CPU Number */
#define PDC_PAT_CPU_GET_HPA 4L /* Return CPU HPA */
#define PDC_PAT_CPU_STOP 5L /* Stop CPU */
#define PDC_PAT_CPU_RENDEZVOUS 6L /* Rendezvous CPU */
#define PDC_PAT_CPU_GET_CLOCK_INFO 7L /* Return CPU Clock info */
#define PDC_PAT_CPU_GET_RENDEZVOUS_STATE 8L /* Return Rendezvous State */
#define PDC_PAT_CPU_PLUNGE_FABRIC 128L /* Plunge Fabric */
#define PDC_PAT_CPU_UPDATE_CACHE_CLEANSING 129L /* Manipulate Cache
* Cleansing Mode */
/* PDC PAT EVENT */
#define PDC_PAT_EVENT 68L /* Interface to Platform Events */
#define PDC_PAT_EVENT_GET_CAPS 0L /* Get Capabilities */
#define PDC_PAT_EVENT_SET_MODE 1L /* Set Notification Mode */
#define PDC_PAT_EVENT_SCAN 2L /* Scan Event */
#define PDC_PAT_EVENT_HANDLE 3L /* Handle Event */
#define PDC_PAT_EVENT_GET_NB_CALL 4L /* Get Non-Blocking call Args*/
/* PDC PAT HPMC */
#define PDC_PAT_HPMC 70L /* Cause processor to go into spin
** loop, and wait for wake up from
** Monarch Processor */
#define PDC_PAT_HPMC_RENDEZ_CPU 0L /* go into spin loop */
#define PDC_PAT_HPMC_SET_PARAMS 1L /* Allows OS to specify intr which PDC
* will use to interrupt OS during machine
* check rendezvous */
/* parameters for PDC_PAT_HPMC_SET_PARAMS */
#define HPMC_SET_PARAMS_INTR 1L /* Rendezvous Interrupt */
#define HPMC_SET_PARAMS_WAKE 2L /* Wake up processor */
/* PDC PAT IO */
#define PDC_PAT_IO 71L /* On-line services for I/O modules */
#define PDC_PAT_IO_GET_SLOT_STATUS 5L /* Get Slot Status Info */
#define PDC_PAT_IO_GET_LOC_FROM_HARDWARE 6L /* Get Physical Location from */
/* Hardware Path */
#define PDC_PAT_IO_GET_HARDWARE_FROM_LOC 7L /* Get Hardware Path from
* Physical Location */
#define PDC_PAT_IO_GET_PCI_CONFIG_FROM_HW 11L /* Get PCI Configuration
* Address from Hardware Path */
#define PDC_PAT_IO_GET_HW_FROM_PCI_CONFIG 12L /* Get Hardware Path
* from PCI Configuration Address */
#define PDC_PAT_IO_READ_HOST_BRIDGE_INFO 13L /* Read Host Bridge State Info */
#define PDC_PAT_IO_CLEAR_HOST_BRIDGE_INFO 14L /* Clear Host Bridge State Info*/
#define PDC_PAT_IO_GET_PCI_ROUTING_TABLE_SIZE 15L /* Get PCI INT Routing Table
* Size */
#define PDC_PAT_IO_GET_PCI_ROUTING_TABLE 16L /* Get PCI INT Routing Table */
#define PDC_PAT_IO_GET_HINT_TABLE_SIZE 17L /* Get Hint Table Size */
#define PDC_PAT_IO_GET_HINT_TABLE 18L /* Get Hint Table */
#define PDC_PAT_IO_PCI_CONFIG_READ 19L /* PCI Config Read */
#define PDC_PAT_IO_PCI_CONFIG_WRITE 20L /* PCI Config Write */
#define PDC_PAT_IO_GET_NUM_IO_SLOTS 21L /* Get Number of I/O Bay Slots in
* Cabinet */
#define PDC_PAT_IO_GET_LOC_IO_SLOTS 22L /* Get Physical Location of I/O */
/* Bay Slots in Cabinet */
#define PDC_PAT_IO_BAY_STATUS_INFO 28L /* Get I/O Bay Slot Status Info */
#define PDC_PAT_IO_GET_PROC_VIEW 29L /* Get Processor view of IO address */
#define PDC_PAT_IO_PROG_SBA_DIR_RANGE 30L /* Program directed range */
/* PDC PAT MEM */
#define PDC_PAT_MEM 72L /* Manage memory page deallocation */
#define PDC_PAT_MEM_PD_INFO 0L /* Return PDT info for PD */
#define PDC_PAT_MEM_PD_CLEAR 1L /* Clear PDT for PD */
#define PDC_PAT_MEM_PD_READ 2L /* Read PDT entries for PD */
#define PDC_PAT_MEM_PD_RESET 3L /* Reset clear bit for PD */
#define PDC_PAT_MEM_CELL_INFO 5L /* Return PDT info For Cell */
#define PDC_PAT_MEM_CELL_CLEAR 6L /* Clear PDT For Cell */
#define PDC_PAT_MEM_CELL_READ 7L /* Read PDT entries For Cell */
#define PDC_PAT_MEM_CELL_RESET 8L /* Reset clear bit For Cell */
#define PDC_PAT_MEM_SETGM 9L /* Set Golden Memory value */
#define PDC_PAT_MEM_ADD_PAGE 10L /* ADDs a page to the cell */
#define PDC_PAT_MEM_ADDRESS 11L /* Get Physical Location From*/
/* Memory Address */
#define PDC_PAT_MEM_GET_TXT_SIZE 12L /* Get Formatted Text Size */
#define PDC_PAT_MEM_GET_PD_TXT 13L /* Get PD Formatted Text */
#define PDC_PAT_MEM_GET_CELL_TXT 14L /* Get Cell Formatted Text */
#define PDC_PAT_MEM_RD_STATE_INFO 15L /* Read Mem Module State Info*/
#define PDC_PAT_MEM_CLR_STATE_INFO 16L /*Clear Mem Module State Info*/
#define PDC_PAT_MEM_CLEAN_RANGE 128L /*Clean Mem in specific range*/
#define PDC_PAT_MEM_GET_TBL_SIZE 131L /* Get Memory Table Size */
#define PDC_PAT_MEM_GET_TBL 132L /* Get Memory Table */
/* PDC PAT NVOLATILE */
#define PDC_PAT_NVOLATILE 73L /* Access Non-Volatile Memory*/
#define PDC_PAT_NVOLATILE_READ 0L /* Read Non-Volatile Memory */
#define PDC_PAT_NVOLATILE_WRITE 1L /* Write Non-Volatile Memory */
#define PDC_PAT_NVOLATILE_GET_SIZE 2L /* Return size of NVM */
#define PDC_PAT_NVOLATILE_VERIFY 3L /* Verify contents of NVM */
#define PDC_PAT_NVOLATILE_INIT 4L /* Initialize NVM */
/* PDC PAT PD */
#define PDC_PAT_PD 74L /* Protection Domain Info */
#define PDC_PAT_PD_GET_ADDR_MAP 0L /* Get Address Map */
/* PDC_PAT_PD_GET_ADDR_MAP entry types */
#define PAT_MEMORY_DESCRIPTOR 1
/* PDC_PAT_PD_GET_ADDR_MAP memory types */
#define PAT_MEMTYPE_MEMORY 0
#define PAT_MEMTYPE_FIRMWARE 4
/* PDC_PAT_PD_GET_ADDR_MAP memory usage */
#define PAT_MEMUSE_GENERAL 0
#define PAT_MEMUSE_GI 128
#define PAT_MEMUSE_GNI 129
#endif /* __LP64__ */
#ifndef __ASSEMBLY__
#include <linux/types.h>
extern int pdc_type;
/* Values for pdc_type */
#define PDC_TYPE_ILLEGAL -1
#define PDC_TYPE_PAT 0 /* 64-bit PAT-PDC */
#define PDC_TYPE_SYSTEM_MAP 1 /* 32-bit, but supports PDC_SYSTEM_MAP */
#define PDC_TYPE_SNAKE 2 /* Doesn't support SYSTEM_MAP */
#define is_pdc_pat() (pdc_type == PDC_TYPE_PAT)
struct pdc_chassis_info { /* for PDC_CHASSIS_INFO */
unsigned long actcnt; /* actual number of bytes returned */
unsigned long maxcnt; /* maximum number of bytes that could be returned */
};
struct pdc_coproc_cfg { /* for PDC_COPROC_CFG */
unsigned long ccr_functional;
unsigned long ccr_present;
unsigned long revision;
unsigned long model;
};
struct pdc_model { /* for PDC_MODEL */
unsigned long hversion;
unsigned long sversion;
......@@ -178,33 +500,22 @@ struct pdc_model { /* for PDC_MODEL */
unsigned long arch_rev;
unsigned long pot_key;
unsigned long curr_key;
unsigned long pad[32-9];
} __attribute__((aligned(8))) ;
#if 0
struct pdc_chassis_warn { /* for PDC_CHASSIS */
unsigned long warn;
unsigned long pad[32-1];
} __attribute__((aligned(8))) ;
#endif
};
struct pdc_model_sysmodel { /* for PDC_MODEL_SYSMODEL */
unsigned long mod_len;
unsigned long pad[32-1];
} __attribute__((aligned(8))) ;
/* Values for PDC_MODEL_CAPABILITES non-equivalent virtual aliasing support */
struct pdc_model_cpuid { /* for PDC_MODEL_CPU_ID */
unsigned long cpuid;
unsigned long pad[32-1];
} __attribute__((aligned(8))) ;
#define PDC_MODEL_IOPDIR_FDC (1 << 2) /* see sba_iommu.c */
#define PDC_MODEL_NVA_MASK (3 << 4)
#define PDC_MODEL_NVA_SUPPORTED (0 << 4)
#define PDC_MODEL_NVA_SLOW (1 << 4)
#define PDC_MODEL_NVA_UNSUPPORTED (3 << 4)
struct pdc_cache_cf { /* for PDC_CACHE (I/D-caches) */
unsigned long
#ifdef __LP64__
cc_padW:32,
#endif
cc_alias:4, /* alias boundaries for virtual adresses */
cc_alias:4, /* alias boundaries for virtual addresses */
cc_block: 4, /* to determine most efficient stride */
cc_line : 3, /* maximum amount written back as a result of store (multiple of 16 bytes) */
cc_pad0 : 2, /* reserved */
......@@ -263,14 +574,7 @@ struct pdc_cache_info { /* main-PDC_CACHE-structure (caches & TLB's) */
unsigned long dt_off_stride;
unsigned long dt_off_count;
unsigned long dt_loop;
/* padded to 32 entries... */
unsigned long pad[32-30];
} __attribute__((aligned(8))) ;
struct pdc_hpa { /* PDC_HPA */
unsigned long hpa;
unsigned long filler[31];
} __attribute__((aligned(8))) ;
};
#if 0
/* If you start using the next struct, you'll have to adjust it to
......@@ -287,14 +591,14 @@ struct pdc_iodc { /* PDC_IODC */
unsigned char rev;
unsigned char dep;
unsigned char features;
unsigned char filler1;
unsigned char pad1;
unsigned int checksum:16;
unsigned int length:16;
unsigned int filler[15];
unsigned int pad[15];
} __attribute__((aligned(8))) ;
#endif
#ifndef __LP64__
#ifndef CONFIG_PA20
/* no BLTBs in pa2.0 processors */
struct pdc_btlb_info_range {
__u8 res00;
......@@ -308,53 +612,95 @@ struct pdc_btlb_info { /* PDC_BLOCK_TLB, return of PDC_BTLB_INFO */
unsigned int max_size; /* maximum size of BTLB in pages */
struct pdc_btlb_info_range fixed_range_info;
struct pdc_btlb_info_range variable_range_info;
unsigned int pad[32-4];
} __attribute__((aligned(8))) ;
#endif
};
struct pdc_tlb { /* for PDC_TLB */
unsigned long min_size;
unsigned long max_size;
unsigned long pad[32-2];
} __attribute__((aligned(8))) ;
#endif /* !CONFIG_PA20 */
struct pdc_system_map { /* PDC_SYTEM_MAP/FIND_MODULE */
void * mod_addr;
#ifdef __LP64__
struct pdc_memory_table_raddr { /* PDC_MEM/PDC_MEM_TABLE (return info) */
unsigned long entries_returned;
unsigned long entries_total;
};
struct pdc_memory_table { /* PDC_MEM/PDC_MEM_TABLE (arguments) */
unsigned long paddr;
unsigned int pages;
unsigned int reserved;
};
#endif /* __LP64__ */
struct pdc_system_map_mod_info { /* PDC_SYSTEM_MAP/FIND_MODULE */
unsigned long mod_addr;
unsigned long mod_pgs;
unsigned long add_addrs;
unsigned long filler[29];
} __attribute__((aligned(8))) ;
};
/*
* Device path specifications used by PDC.
*/
struct pdc_module_path {
struct pdc_system_map_addr_info { /* PDC_SYSTEM_MAP/FIND_ADDRESS */
unsigned long mod_addr;
unsigned long mod_pgs;
};
struct hardware_path {
char flags; /* see bit definitions below */
char bc[6]; /* Bus Converter routing info to a specific */
/* I/O adaptor (< 0 means none, > 63 resvd) */
char mod; /* fixed field of specified module */
unsigned int layers[6]; /* device-specific info (ctlr #, unit # ...) */
} __attribute__((aligned(8))) ;
};
#ifndef __LP64__
/* Probably needs 64-bit porting -PB */
struct pdc_memory_map { /* PDC_MEMORY_MAP */
unsigned int hpa; /* mod's register set address */
unsigned int more_pgs; /* number of additional I/O pgs */
} __attribute__((aligned(8))) ;
/*
* Device path specifications used by PDC.
*/
struct pdc_module_path {
struct hardware_path path;
unsigned int layers[6]; /* device-specific info (ctlr #, unit # ...) */
};
struct pdc_lan_station_id { /* PDC_LAN_STATION_ID */
unsigned char addr[PDC_LAN_STATION_ID_SIZE];
unsigned char pad0[2];
int pad1[30];
#ifndef CONFIG_PA20
/* Only used on some pre-PA2.0 boxes */
struct pdc_memory_map { /* PDC_MEMORY_MAP */
unsigned long hpa; /* mod's register set address */
unsigned long more_pgs; /* number of additional I/O pgs */
};
#endif
struct pdc_tod {
unsigned long tod_sec;
unsigned long tod_usec;
long pad[30];
} __attribute__((aligned(8))) ;
};
#ifdef __LP64__
struct pdc_pat_cell_num {
unsigned long cell_num;
unsigned long cell_loc;
};
struct pdc_pat_cpu_num {
unsigned long cpu_num;
unsigned long cpu_loc;
};
struct pdc_pat_pd_addr_map_entry {
unsigned char entry_type; /* 1 = Memory Descriptor Entry Type */
unsigned char reserve1[5];
unsigned char memory_type;
unsigned char memory_usage;
unsigned long paddr;
unsigned int pages; /* Length in 4K pages */
unsigned int reserve2;
unsigned long cell_map;
};
/* FIXME: mod[508] should really be a union of the various mod components */
struct pdc_pat_cell_mod_maddr_block { /* PDC_PAT_CELL_MODULE */
unsigned long cba; /* function 0 configuration space address */
unsigned long mod_info; /* module information */
unsigned long mod_location; /* physical location of the module */
struct hardware_path mod_path; /* hardware path */
unsigned long mod[508]; /* PAT cell module components */
};
typedef struct pdc_pat_cell_mod_maddr_block pdc_pat_cell_mod_maddr_block_t;
#endif /* __LP64__ */
/* architected results from PDC_PIM/transfer hpmc on a PA1.1 machine */
......@@ -471,10 +817,13 @@ struct pz_device {
/* IODC ENTRY_IO() */
#define ENTRY_IO_BOOTIN 0
#define ENTRY_IO_BOOTOUT 1
#define ENTRY_IO_CIN 2
#define ENTRY_IO_COUT 3
#define ENTRY_IO_CLOSE 4
#define ENTRY_IO_GETMSG 9
#define ENTRY_IO_BBLOCK_IN 16
#define ENTRY_IO_BBLOCK_OUT 17
/* IODC ENTRY_SPA() */
......@@ -490,7 +839,7 @@ struct pz_device {
#ifndef __ASSEMBLY__
#define PAGE0 ((struct zeropage *)0xc0000000)
#define PAGE0 ((struct zeropage *)__PAGE_OFFSET)
struct zeropage {
/* [0x000] initialize vectors (VEC) */
......@@ -557,71 +906,107 @@ struct zeropage {
#define BOOT_CONSOLE_PATH_OFFSET 0x3a8
#ifndef __ASSEMBLY__
void pdc_console_init(void); /* in pdc_console.c */
void pdc_console_restart(void);
struct pdc_pat_io_num {
unsigned long num;
unsigned long reserved[31];
};
extern void pdc_console_init(void);
extern int pdc_getc(void); /* wait for char */
extern void pdc_putc(unsigned char); /* print char */
void setup_pdc(void); /* in inventory.c */
/* wrapper-functions from pdc.c */
int pdc_add_valid(void *address);
int pdc_hpa_processor(void *address);
#if 0
int pdc_hpa_modules(void *address);
#endif
int pdc_iodc_read(void *address, void *hpa, unsigned int index,
int pdc_add_valid(unsigned long address);
int pdc_chassis_info(struct pdc_chassis_info *chassis_info, void *led_info, unsigned long len);
int pdc_chassis_disp(unsigned long disp);
int pdc_coproc_cfg(struct pdc_coproc_cfg *pdc_coproc_info);
int pdc_iodc_read(unsigned long *actcnt, unsigned long hpa, unsigned int index,
void *iodc_data, unsigned int iodc_data_size);
int pdc_system_map_find_mods(void *pdc_mod_info, void *mod_path, int index);
int pdc_system_map_find_mods(struct pdc_system_map_mod_info *pdc_mod_info,
struct pdc_module_path *mod_path, long mod_index);
int pdc_system_map_find_addrs(struct pdc_system_map_addr_info *pdc_addr_info,
long mod_index, long addr_index);
int pdc_model_info(struct pdc_model *model);
int pdc_model_sysmodel(char *name);
int pdc_model_cpuid(struct pdc_model_cpuid *cpu_id);
int pdc_model_versions(struct pdc_model_cpuid *cpu_id, int id);
int pdc_model_sysmodel(char *name);
int pdc_model_cpuid(unsigned long *cpu_id);
int pdc_model_versions(unsigned long *versions, int id);
int pdc_model_capabilities(unsigned long *capabilities);
int pdc_cache_info(struct pdc_cache_info *cache);
#ifndef __LP64__
int pdc_btlb_info( struct pdc_btlb_info *btlb);
int pdc_lan_station_id( char *lan_addr, void *net_hpa);
#endif
int pdc_mem_map_hpa(void *r_addr, void *mod_path);
#ifndef CONFIG_PA20
int pdc_btlb_info(struct pdc_btlb_info *btlb);
int pdc_mem_map_hpa(struct pdc_memory_map *r_addr, struct pdc_module_path *mod_path);
#endif /* !CONFIG_PA20 */
int pdc_lan_station_id(char *lan_addr, unsigned long net_hpa);
extern int pdc_chassis_disp(unsigned long disp);
extern int pdc_chassis_info(void *pdc_result, void *chassis_info, unsigned long len);
#ifdef __LP64__
int pdc_pat_get_irt_size(void *r_addr, unsigned long cell_num);
int pdc_pat_get_irt(void *r_addr, unsigned long cell_num);
#else
/* No PAT support for 32-bit kernels...sorry */
#define pdc_pat_get_irt_size(r_addr, cell_numn) PDC_RET_NE_PROC
#define pdc_pat_get_irt(r_addr, cell_num) PDC_RET_NE_PROC
#endif
int pdc_pci_irt_size(void *r_addr, void *hpa);
int pdc_pci_irt(void *r_addr, void *hpa, void *tbl);
int pdc_pci_irt_size(unsigned long *num_entries, unsigned long hpa);
int pdc_pci_irt(unsigned long num_entries, unsigned long hpa, void *tbl);
int pdc_get_initiator(struct hardware_path *hwpath, unsigned char *scsi_id, unsigned long *period, char *width, char *mode);
int pdc_tod_read(struct pdc_tod *tod);
int pdc_tod_set(unsigned long sec, unsigned long usec);
/* on all currently-supported platforms, IODC I/O calls are always
* 32-bit calls, and MEM_PDC calls are always the same width as the OS.
* This means Cxxx boxes can't run wide kernels right now. -PB
*
* Note that some PAT boxes may have 64-bit IODC I/O...
*/
#ifdef __LP64__
# define mem_pdc_call(args...) real64_call(0L, ##args)
#else
# define mem_pdc_call(args...) real32_call(0L, ##args)
int pdc_mem_mem_table(struct pdc_memory_table_raddr *r_addr,
struct pdc_memory_table *tbl, unsigned long entries);
#endif
/* yes 'int', not 'long' -- IODC I/O is always 32-bit stuff */
extern long real64_call(unsigned long function, ...);
extern long real32_call(unsigned long function, ...);
int pdc_do_firm_test_reset(unsigned long ftc_bitmap);
int pdc_do_reset(void);
int pdc_soft_power_info(unsigned long *power_reg);
int pdc_soft_power_button(int sw_control);
void pdc_suspend_usb(void);
int pdc_iodc_getc(void);
void pdc_iodc_putc(unsigned char c);
void pdc_iodc_outc(unsigned char c);
void pdc_emergency_unlock(void);
int pdc_sti_call(unsigned long func, unsigned long flags,
unsigned long inptr, unsigned long outputr,
unsigned long glob_cfg);
#ifdef __LP64__
int pdc_pat_chassis_send_log(unsigned long status, unsigned long data);
int pdc_pat_cell_get_number(struct pdc_pat_cell_num *cell_info);
int pdc_pat_cell_module(unsigned long *actcnt, unsigned long ploc, unsigned long mod,
unsigned long view_type, void *mem_addr);
int pdc_pat_cpu_get_number(struct pdc_pat_cpu_num *cpu_info, void *hpa);
int pdc_pat_get_irt_size(unsigned long *num_entries, unsigned long cell_num);
int pdc_pat_get_irt(void *r_addr, unsigned long cell_num);
int pdc_pat_pd_get_addr_map(unsigned long *actual_len, void *mem_addr,
unsigned long count, unsigned long offset);
/********************************************************************
* PDC_PAT_CELL[Return Cell Module] memaddr[0] conf_base_addr
* ----------------------------------------------------------
* Bit 0 to 51 - conf_base_addr
* Bit 52 to 62 - reserved
* Bit 63 - endianess bit
********************************************************************/
#define PAT_GET_CBA(value) ((value) & 0xfffffffffffff000UL)
/********************************************************************
* PDC_PAT_CELL[Return Cell Module] memaddr[1] mod_info
* ----------------------------------------------------
* Bit 0 to 7 - entity type
* 0 = central agent, 1 = processor,
* 2 = memory controller, 3 = system bus adapter,
* 4 = local bus adapter, 5 = processor bus converter,
* 6 = crossbar fabric connect, 7 = fabric interconnect,
* 8 to 254 reserved, 255 = unknown.
* Bit 8 to 15 - DVI
* Bit 16 to 23 - IOC functions
* Bit 24 to 39 - reserved
* Bit 40 to 63 - mod_pages
* number of 4K pages a module occupies starting at conf_base_addr
********************************************************************/
#define PAT_GET_ENTITY(value) (((value) >> 56) & 0xffUL)
#define PAT_GET_DVI(value) (((value) >> 48) & 0xffUL)
#define PAT_GET_IOC(value) (((value) >> 40) & 0xffUL)
#define PAT_GET_MOD_PAGES(value)(((value) & 0xffffffUL)
#else /* !__LP64__ */
/* No PAT support for 32-bit kernels...sorry */
#define pdc_pat_get_irt_size(num_entries, cell_numn) PDC_BAD_PROC
#define pdc_pat_get_irt(r_addr, cell_num) PDC_BAD_PROC
#endif /* !__LP64__ */
extern void pdc_init(void);
#endif /* __ASSEMBLY__ */
......
/*
* include/asm-parisc/pdc_chassis.h
*
* Copyright (C) 2002 Laurent Canet <canetl@esiee.fr>
* Copyright (C) 2002 Thibaut Varene <varenet@esiee.fr>
*
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; either version 2, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* TODO: - handle processor number on SMP systems (Reporting Entity ID)
* - handle message ID
* - handle timestamps
*/
#ifndef _PARISC_PDC_CHASSIS_H
#define _PARISC_PDC_CHASSIS_H
/*
* ----------
* Prototypes
* ----------
*/
int pdc_chassis_send_status(int message);
void __init parisc_pdc_chassis_init(void);
/*
* -----------------
* Direct call names
* -----------------
* They setup everything for you, the Log message and the corresponding LED state
*/
#define PDC_CHASSIS_DIRECT_BSTART 0
#define PDC_CHASSIS_DIRECT_BCOMPLETE 1
#define PDC_CHASSIS_DIRECT_SHUTDOWN 2
#define PDC_CHASSIS_DIRECT_PANIC 3
#define PDC_CHASSIS_DIRECT_HPMC 4
#define PDC_CHASSIS_DIRECT_LPMC 5
#define PDC_CHASSIS_DIRECT_DUMP 6 /* not yet implemented */
#define PDC_CHASSIS_DIRECT_OOPS 7 /* not yet implemented */
/*
* ------------
* LEDs control
* ------------
* Set the three LEDs -- Run, Attn, and Fault.
*/
/* Old PDC LED control */
#define PDC_CHASSIS_DISP_DATA(v) ((unsigned long)(v) << 17)
/*
* Available PDC PAT LED states
*/
#define PDC_CHASSIS_LED_RUN_OFF (0ULL << 4)
#define PDC_CHASSIS_LED_RUN_FLASH (1ULL << 4)
#define PDC_CHASSIS_LED_RUN_ON (2ULL << 4)
#define PDC_CHASSIS_LED_RUN_NC (3ULL << 4)
#define PDC_CHASSIS_LED_ATTN_OFF (0ULL << 6)
#define PDC_CHASSIS_LED_ATTN_FLASH (1ULL << 6)
#define PDC_CHASSIS_LED_ATTN_NC (3ULL << 6) /* ATTN ON is invalid */
#define PDC_CHASSIS_LED_FAULT_OFF (0ULL << 8)
#define PDC_CHASSIS_LED_FAULT_FLASH (1ULL << 8)
#define PDC_CHASSIS_LED_FAULT_ON (2ULL << 8)
#define PDC_CHASSIS_LED_FAULT_NC (3ULL << 8)
#define PDC_CHASSIS_LED_VALID (1ULL << 10)
/*
* Valid PDC PAT LED states combinations
*/
/* System running normally */
#define PDC_CHASSIS_LSTATE_RUN_NORMAL (PDC_CHASSIS_LED_RUN_ON | \
PDC_CHASSIS_LED_ATTN_OFF | \
PDC_CHASSIS_LED_FAULT_OFF | \
PDC_CHASSIS_LED_VALID )
/* System crashed and rebooted itself successfully */
#define PDC_CHASSIS_LSTATE_RUN_CRASHREC (PDC_CHASSIS_LED_RUN_ON | \
PDC_CHASSIS_LED_ATTN_OFF | \
PDC_CHASSIS_LED_FAULT_FLASH | \
PDC_CHASSIS_LED_VALID )
/* There was a system interruption that did not take the system down */
#define PDC_CHASSIS_LSTATE_RUN_SYSINT (PDC_CHASSIS_LED_RUN_ON | \
PDC_CHASSIS_LED_ATTN_FLASH | \
PDC_CHASSIS_LED_FAULT_OFF | \
PDC_CHASSIS_LED_VALID )
/* System running and unexpected reboot or non-critical error detected */
#define PDC_CHASSIS_LSTATE_RUN_NCRIT (PDC_CHASSIS_LED_RUN_ON | \
PDC_CHASSIS_LED_ATTN_FLASH | \
PDC_CHASSIS_LED_FAULT_FLASH | \
PDC_CHASSIS_LED_VALID )
/* Executing non-OS code */
#define PDC_CHASSIS_LSTATE_NONOS (PDC_CHASSIS_LED_RUN_FLASH | \
PDC_CHASSIS_LED_ATTN_OFF | \
PDC_CHASSIS_LED_FAULT_OFF | \
PDC_CHASSIS_LED_VALID )
/* Boot failed - Executing non-OS code */
#define PDC_CHASSIS_LSTATE_NONOS_BFAIL (PDC_CHASSIS_LED_RUN_FLASH | \
PDC_CHASSIS_LED_ATTN_OFF | \
PDC_CHASSIS_LED_FAULT_ON | \
PDC_CHASSIS_LED_VALID )
/* Unexpected reboot occured - Executing non-OS code */
#define PDC_CHASSIS_LSTATE_NONOS_UNEXP (PDC_CHASSIS_LED_RUN_FLASH | \
PDC_CHASSIS_LED_ATTN_OFF | \
PDC_CHASSIS_LED_FAULT_FLASH | \
PDC_CHASSIS_LED_VALID )
/* Executing non-OS code - Non-critical error detected */
#define PDC_CHASSIS_LSTATE_NONOS_NCRIT (PDC_CHASSIS_LED_RUN_FLASH | \
PDC_CHASSIS_LED_ATTN_FLASH | \
PDC_CHASSIS_LED_FAULT_OFF | \
PDC_CHASSIS_LED_VALID )
/* Boot failed - Executing non-OS code - Non-critical error detected */
#define PDC_CHASSIS_LSTATE_BFAIL_NCRIT (PDC_CHASSIS_LED_RUN_FLASH | \
PDC_CHASSIS_LED_ATTN_FLASH | \
PDC_CHASSIS_LED_FAULT_ON | \
PDC_CHASSIS_LED_VALID )
/* Unexpected reboot/recovering - Executing non-OS code - Non-critical error detected */
#define PDC_CHASSIS_LSTATE_UNEXP_NCRIT (PDC_CHASSIS_LED_RUN_FLASH | \
PDC_CHASSIS_LED_ATTN_FLASH | \
PDC_CHASSIS_LED_FAULT_FLASH | \
PDC_CHASSIS_LED_VALID )
/* Cannot execute PDC */
#define PDC_CHASSIS_LSTATE_CANNOT_PDC (PDC_CHASSIS_LED_RUN_OFF | \
PDC_CHASSIS_LED_ATTN_OFF | \
PDC_CHASSIS_LED_FAULT_OFF | \
PDC_CHASSIS_LED_VALID )
/* Boot failed - OS not up - PDC has detected a failure that prevents boot */
#define PDC_CHASSIS_LSTATE_FATAL_BFAIL (PDC_CHASSIS_LED_RUN_OFF | \
PDC_CHASSIS_LED_ATTN_OFF | \
PDC_CHASSIS_LED_FAULT_ON | \
PDC_CHASSIS_LED_VALID )
/* No code running - Non-critical error detected (double fault situation) */
#define PDC_CHASSIS_LSTATE_NOCODE_NCRIT (PDC_CHASSIS_LED_RUN_OFF | \
PDC_CHASSIS_LED_ATTN_FLASH | \
PDC_CHASSIS_LED_FAULT_OFF | \
PDC_CHASSIS_LED_VALID )
/* Boot failed - OS not up - Fatal failure detected - Non-critical error detected */
#define PDC_CHASSIS_LSTATE_FATAL_NCRIT (PDC_CHASSIS_LED_RUN_OFF | \
PDC_CHASSIS_LED_ATTN_FLASH | \
PDC_CHASSIS_LED_FAULT_ON | \
PDC_CHASSIS_LED_VALID )
/* All other states are invalid */
/*
* --------------
* PDC Log events
* --------------
* Here follows bits needed to fill up the log event sent to PDC_CHASSIS
* The log message contains: Alert level, Source, Source detail,
* Source ID, Problem detail, Caller activity, Activity status,
* Caller subactivity, Reporting entity type, Reporting entity ID,
* Data type, Unique message ID and EOM.
*/
/* Alert level */
#define PDC_CHASSIS_ALERT_FORWARD (0ULL << 36) /* no failure detected */
#define PDC_CHASSIS_ALERT_SERPROC (1ULL << 36) /* service proc - no failure */
#define PDC_CHASSIS_ALERT_NURGENT (2ULL << 36) /* non-urgent operator attn */
#define PDC_CHASSIS_ALERT_BLOCKED (3ULL << 36) /* system blocked */
#define PDC_CHASSIS_ALERT_CONF_CHG (4ULL << 36) /* unexpected configuration change */
#define PDC_CHASSIS_ALERT_ENV_PB (5ULL << 36) /* boot possible, environmental pb */
#define PDC_CHASSIS_ALERT_PENDING (6ULL << 36) /* boot possible, pending failure */
#define PDC_CHASSIS_ALERT_PERF_IMP (8ULL << 36) /* boot possible, performance impaired */
#define PDC_CHASSIS_ALERT_FUNC_IMP (10ULL << 36) /* boot possible, functionality impaired */
#define PDC_CHASSIS_ALERT_SOFT_FAIL (12ULL << 36) /* software failure */
#define PDC_CHASSIS_ALERT_HANG (13ULL << 36) /* system hang */
#define PDC_CHASSIS_ALERT_ENV_FATAL (14ULL << 36) /* fatal power or environmental pb */
#define PDC_CHASSIS_ALERT_HW_FATAL (15ULL << 36) /* fatal hardware problem */
/* Source */
#define PDC_CHASSIS_SRC_NONE (0ULL << 28) /* unknown, no source stated */
#define PDC_CHASSIS_SRC_PROC (1ULL << 28) /* processor */
/* For later use ? */
#define PDC_CHASSIS_SRC_PROC_CACHE (2ULL << 28) /* processor cache*/
#define PDC_CHASSIS_SRC_PDH (3ULL << 28) /* processor dependent hardware */
#define PDC_CHASSIS_SRC_PWR (4ULL << 28) /* power */
#define PDC_CHASSIS_SRC_FAB (5ULL << 28) /* fabric connector */
#define PDC_CHASSIS_SRC_PLATi (6ULL << 28) /* platform */
#define PDC_CHASSIS_SRC_MEM (7ULL << 28) /* memory */
#define PDC_CHASSIS_SRC_IO (8ULL << 28) /* I/O */
#define PDC_CHASSIS_SRC_CELL (9ULL << 28) /* cell */
#define PDC_CHASSIS_SRC_PD (10ULL << 28) /* protected domain */
/* Source detail field */
#define PDC_CHASSIS_SRC_D_PROC (1ULL << 24) /* processor general */
/* Source ID - platform dependent */
#define PDC_CHASSIS_SRC_ID_UNSPEC (0ULL << 16)
/* Problem detail - problem source dependent */
#define PDC_CHASSIS_PB_D_PROC_NONE (0ULL << 32) /* no problem detail */
#define PDC_CHASSIS_PB_D_PROC_TIMEOUT (4ULL << 32) /* timeout */
/* Caller activity */
#define PDC_CHASSIS_CALL_ACT_HPUX_BL (7ULL << 12) /* Boot Loader */
#define PDC_CHASSIS_CALL_ACT_HPUX_PD (8ULL << 12) /* SAL_PD activities */
#define PDC_CHASSIS_CALL_ACT_HPUX_EVENT (9ULL << 12) /* SAL_EVENTS activities */
#define PDC_CHASSIS_CALL_ACT_HPUX_IO (10ULL << 12) /* SAL_IO activities */
#define PDC_CHASSIS_CALL_ACT_HPUX_PANIC (11ULL << 12) /* System panic */
#define PDC_CHASSIS_CALL_ACT_HPUX_INIT (12ULL << 12) /* System initialization */
#define PDC_CHASSIS_CALL_ACT_HPUX_SHUT (13ULL << 12) /* System shutdown */
#define PDC_CHASSIS_CALL_ACT_HPUX_WARN (14ULL << 12) /* System warning */
#define PDC_CHASSIS_CALL_ACT_HPUX_DU (15ULL << 12) /* Display_Activity() update */
/* Activity status - implementation dependent */
#define PDC_CHASSIS_ACT_STATUS_UNSPEC (0ULL << 0)
/* Caller subactivity - implementation dependent */
/* FIXME: other subactivities ? */
#define PDC_CHASSIS_CALL_SACT_UNSPEC (0ULL << 4) /* implementation dependent */
/* Reporting entity type */
#define PDC_CHASSIS_RET_GENERICOS (12ULL << 52) /* generic OSes */
#define PDC_CHASSIS_RET_IA64_NT (13ULL << 52) /* IA-64 NT */
#define PDC_CHASSIS_RET_HPUX (14ULL << 52) /* HP-UX */
#define PDC_CHASSIS_RET_DIAG (15ULL << 52) /* offline diagnostics & utilities */
/* Reporting entity ID */
#define PDC_CHASSIS_REID_UNSPEC (0ULL << 44)
/* Data type */
#define PDC_CHASSIS_DT_NONE (0ULL << 59) /* data field unused */
/* For later use ? Do we need these ? */
#define PDC_CHASSIS_DT_PHYS_ADDR (1ULL << 59) /* physical address */
#define PDC_CHASSIS_DT_DATA_EXPECT (2ULL << 59) /* expected data */
#define PDC_CHASSIS_DT_ACTUAL (3ULL << 59) /* actual data */
#define PDC_CHASSIS_DT_PHYS_LOC (4ULL << 59) /* physical location */
#define PDC_CHASSIS_DT_PHYS_LOC_EXT (5ULL << 59) /* physical location extension */
#define PDC_CHASSIS_DT_TAG (6ULL << 59) /* tag */
#define PDC_CHASSIS_DT_SYNDROME (7ULL << 59) /* syndrome */
#define PDC_CHASSIS_DT_CODE_ADDR (8ULL << 59) /* code address */
#define PDC_CHASSIS_DT_ASCII_MSG (9ULL << 59) /* ascii message */
#define PDC_CHASSIS_DT_POST (10ULL << 59) /* POST code */
#define PDC_CHASSIS_DT_TIMESTAMP (11ULL << 59) /* timestamp */
#define PDC_CHASSIS_DT_DEV_STAT (12ULL << 59) /* device status */
#define PDC_CHASSIS_DT_DEV_TYPE (13ULL << 59) /* device type */
#define PDC_CHASSIS_DT_PB_DET (14ULL << 59) /* problem detail */
#define PDC_CHASSIS_DT_ACT_LEV (15ULL << 59) /* activity level/timeout */
#define PDC_CHASSIS_DT_SER_NUM (16ULL << 59) /* serial number */
#define PDC_CHASSIS_DT_REV_NUM (17ULL << 59) /* revision number */
#define PDC_CHASSIS_DT_INTERRUPT (18ULL << 59) /* interruption information */
#define PDC_CHASSIS_DT_TEST_NUM (19ULL << 59) /* test number */
#define PDC_CHASSIS_DT_STATE_CHG (20ULL << 59) /* major changes in system state */
#define PDC_CHASSIS_DT_PROC_DEALLOC (21ULL << 59) /* processor deallocate */
#define PDC_CHASSIS_DT_RESET (30ULL << 59) /* reset type and cause */
#define PDC_CHASSIS_DT_PA_LEGACY (31ULL << 59) /* legacy PA hex chassis code */
/* System states - part of major changes in system state data field */
#define PDC_CHASSIS_SYSTATE_BSTART (0ULL << 0) /* boot start */
#define PDC_CHASSIS_SYSTATE_BCOMP (1ULL << 0) /* boot complete */
#define PDC_CHASSIS_SYSTATE_CHANGE (2ULL << 0) /* major change */
#define PDC_CHASSIS_SYSTATE_LED (3ULL << 0) /* LED change */
#define PDC_CHASSIS_SYSTATE_PANIC (9ULL << 0) /* OS Panic */
#define PDC_CHASSIS_SYSTATE_DUMP (10ULL << 0) /* memory dump */
#define PDC_CHASSIS_SYSTATE_HPMC (11ULL << 0) /* processing HPMC */
#define PDC_CHASSIS_SYSTATE_HALT (15ULL << 0) /* system halted */
/* Message ID */
#define PDC_CHASSIS_MSG_ID (0ULL << 40) /* we do not handle msg IDs atm */
/* EOM - separates log entries */
#define PDC_CHASSIS_EOM_CLEAR (0ULL << 43)
#define PDC_CHASSIS_EOM_SET (1ULL << 43)
/*
* Preformated well known messages
*/
/* Boot started */
#define PDC_CHASSIS_PMSG_BSTART (PDC_CHASSIS_ALERT_SERPROC | \
PDC_CHASSIS_SRC_PROC | \
PDC_CHASSIS_SRC_D_PROC | \
PDC_CHASSIS_SRC_ID_UNSPEC | \
PDC_CHASSIS_PB_D_PROC_NONE | \
PDC_CHASSIS_CALL_ACT_HPUX_INIT | \
PDC_CHASSIS_ACT_STATUS_UNSPEC | \
PDC_CHASSIS_CALL_SACT_UNSPEC | \
PDC_CHASSIS_RET_HPUX | \
PDC_CHASSIS_REID_UNSPEC | \
PDC_CHASSIS_DT_STATE_CHG | \
PDC_CHASSIS_SYSTATE_BSTART | \
PDC_CHASSIS_MSG_ID | \
PDC_CHASSIS_EOM_SET )
/* Boot complete */
#define PDC_CHASSIS_PMSG_BCOMPLETE (PDC_CHASSIS_ALERT_SERPROC | \
PDC_CHASSIS_SRC_PROC | \
PDC_CHASSIS_SRC_D_PROC | \
PDC_CHASSIS_SRC_ID_UNSPEC | \
PDC_CHASSIS_PB_D_PROC_NONE | \
PDC_CHASSIS_CALL_ACT_HPUX_INIT | \
PDC_CHASSIS_ACT_STATUS_UNSPEC | \
PDC_CHASSIS_CALL_SACT_UNSPEC | \
PDC_CHASSIS_RET_HPUX | \
PDC_CHASSIS_REID_UNSPEC | \
PDC_CHASSIS_DT_STATE_CHG | \
PDC_CHASSIS_SYSTATE_BCOMP | \
PDC_CHASSIS_MSG_ID | \
PDC_CHASSIS_EOM_SET )
/* Shutdown */
#define PDC_CHASSIS_PMSG_SHUTDOWN (PDC_CHASSIS_ALERT_SERPROC | \
PDC_CHASSIS_SRC_PROC | \
PDC_CHASSIS_SRC_D_PROC | \
PDC_CHASSIS_SRC_ID_UNSPEC | \
PDC_CHASSIS_PB_D_PROC_NONE | \
PDC_CHASSIS_CALL_ACT_HPUX_SHUT | \
PDC_CHASSIS_ACT_STATUS_UNSPEC | \
PDC_CHASSIS_CALL_SACT_UNSPEC | \
PDC_CHASSIS_RET_HPUX | \
PDC_CHASSIS_REID_UNSPEC | \
PDC_CHASSIS_DT_STATE_CHG | \
PDC_CHASSIS_SYSTATE_HALT | \
PDC_CHASSIS_MSG_ID | \
PDC_CHASSIS_EOM_SET )
/* Panic */
#define PDC_CHASSIS_PMSG_PANIC (PDC_CHASSIS_ALERT_SOFT_FAIL | \
PDC_CHASSIS_SRC_PROC | \
PDC_CHASSIS_SRC_D_PROC | \
PDC_CHASSIS_SRC_ID_UNSPEC | \
PDC_CHASSIS_PB_D_PROC_NONE | \
PDC_CHASSIS_CALL_ACT_HPUX_PANIC| \
PDC_CHASSIS_ACT_STATUS_UNSPEC | \
PDC_CHASSIS_CALL_SACT_UNSPEC | \
PDC_CHASSIS_RET_HPUX | \
PDC_CHASSIS_REID_UNSPEC | \
PDC_CHASSIS_DT_STATE_CHG | \
PDC_CHASSIS_SYSTATE_PANIC | \
PDC_CHASSIS_MSG_ID | \
PDC_CHASSIS_EOM_SET )
// FIXME: extrapolated data
/* HPMC */
#define PDC_CHASSIS_PMSG_HPMC (PDC_CHASSIS_ALERT_CONF_CHG /*?*/ | \
PDC_CHASSIS_SRC_PROC | \
PDC_CHASSIS_SRC_D_PROC | \
PDC_CHASSIS_SRC_ID_UNSPEC | \
PDC_CHASSIS_PB_D_PROC_NONE | \
PDC_CHASSIS_CALL_ACT_HPUX_WARN | \
PDC_CHASSIS_RET_HPUX | \
PDC_CHASSIS_DT_STATE_CHG | \
PDC_CHASSIS_SYSTATE_HPMC | \
PDC_CHASSIS_MSG_ID | \
PDC_CHASSIS_EOM_SET )
/* LPMC */
#define PDC_CHASSIS_PMSG_LPMC (PDC_CHASSIS_ALERT_BLOCKED /*?*/| \
PDC_CHASSIS_SRC_PROC | \
PDC_CHASSIS_SRC_D_PROC | \
PDC_CHASSIS_SRC_ID_UNSPEC | \
PDC_CHASSIS_PB_D_PROC_NONE | \
PDC_CHASSIS_CALL_ACT_HPUX_WARN | \
PDC_CHASSIS_ACT_STATUS_UNSPEC | \
PDC_CHASSIS_CALL_SACT_UNSPEC | \
PDC_CHASSIS_RET_HPUX | \
PDC_CHASSIS_REID_UNSPEC | \
PDC_CHASSIS_DT_STATE_CHG | \
PDC_CHASSIS_SYSTATE_CHANGE | \
PDC_CHASSIS_MSG_ID | \
PDC_CHASSIS_EOM_SET )
#endif /* _PARISC_PDC_CHASSIS_H */
/* vim: set ts=8 */
#ifndef _PARISC_PERCPU_H
#define _PARISC_PERCPU_H
#include <asm-generic/percpu.h>
#endif
#ifndef _ASM_PERF_H_
#define _ASM_PERF_H_
/* ioctls */
#define PA_PERF_ON _IO('p', 1)
#define PA_PERF_OFF _IOR('p', 2, unsigned int)
#define PA_PERF_VERSION _IOR('p', 3, int)
#define PA_PERF_DEV "perf"
#define PA_PERF_MINOR 146
/* Interface types */
#define UNKNOWN_INTF 255
#define ONYX_INTF 0
#define CUDA_INTF 1
/* Common Onyx and Cuda images */
#define CPI 0
#define BUSUTIL 1
#define TLBMISS 2
#define TLBHANDMISS 3
#define PTKN 4
#define PNTKN 5
#define IMISS 6
#define DMISS 7
#define DMISS_ACCESS 8
#define BIG_CPI 9
#define BIG_LS 10
#define BR_ABORT 11
#define ISNT 12
#define QUADRANT 13
#define RW_PDFET 14
#define RW_WDFET 15
#define SHLIB_CPI 16
/* Cuda only Images */
#define FLOPS 17
#define CACHEMISS 18
#define BRANCHES 19
#define CRSTACK 20
#define I_CACHE_SPEC 21
#define MAX_CUDA_IMAGES 22
/* Onyx only Images */
#define ADDR_INV_ABORT_ALU 17
#define BRAD_STALL 18
#define CNTL_IN_PIPEL 19
#define DSNT_XFH 20
#define FET_SIG1 21
#define FET_SIG2 22
#define G7_1 23
#define G7_2 24
#define G7_3 25
#define G7_4 26
#define MPB_LABORT 27
#define PANIC 28
#define RARE_INST 29
#define RW_DFET 30
#define RW_IFET 31
#define RW_SDFET 32
#define SPEC_IFET 33
#define ST_COND0 34
#define ST_COND1 35
#define ST_COND2 36
#define ST_COND3 37
#define ST_COND4 38
#define ST_UNPRED0 39
#define ST_UNPRED1 40
#define UNPRED 41
#define GO_STORE 42
#define SHLIB_CALL 43
#define MAX_ONYX_IMAGES 44
#endif
#ifndef _ASM_PGALLOC_H
#define _ASM_PGALLOC_H
/* The usual comment is "Caches aren't brain-dead on the <architecture>".
* Unfortunately, that doesn't apply to PA-RISC. */
#include <linux/config.h>
#include <linux/gfp.h>
#include <linux/mm.h>
#include <linux/threads.h>
#include <asm/processor.h>
#include <asm/fixmap.h>
#include <linux/threads.h>
#include <asm/pgtable.h>
#include <asm/cache.h>
/* Internal use D/I cache flushing routines... */
/* XXX: these functions must not access memory between f[di]ce instructions. */
static inline void __flush_dcache_range(unsigned long start, unsigned long size)
{
#if 0
register unsigned long count = (size / L1_CACHE_BYTES);
register unsigned long loop = cache_info.dc_loop;
register unsigned long i, j;
if (size > 64 * 1024) {
/* Just punt and clear the whole damn thing */
flush_data_cache();
return;
}
for(i = 0; i <= count; i++, start += L1_CACHE_BYTES)
for(j = 0; j < loop; j++)
fdce(start);
#else
flush_data_cache();
#endif
}
static inline void __flush_icache_range(unsigned long start, unsigned long size)
{
#if 0
register unsigned long count = (size / L1_CACHE_BYTES);
register unsigned long loop = cache_info.ic_loop;
register unsigned long i, j;
if (size > 64 * 1024) {
/* Just punt and clear the whole damn thing */
flush_instruction_cache();
return;
}
for(i = 0; i <= count; i++, start += L1_CACHE_BYTES)
for(j = 0; j < loop; j++)
fice(start);
#else
flush_instruction_cache();
#endif
}
static inline void
flush_kernel_dcache_range(unsigned long start, unsigned long size)
{
register unsigned long end = start + size;
register unsigned long i;
start &= ~(L1_CACHE_BYTES - 1);
for (i = start; i < end; i += L1_CACHE_BYTES) {
kernel_fdc(i);
}
asm volatile("sync" : : );
asm volatile("syncdma" : : );
}
extern void __flush_page_to_ram(unsigned long address);
#define flush_cache_all() flush_all_caches()
#define flush_cache_mm(foo) flush_all_caches()
#if 0
/* This is how I think the cache flushing should be done -- mrw */
extern inline void flush_cache_mm(struct mm_struct *mm) {
if (mm == current->mm) {
flush_user_dcache_range(mm->start_data, mm->end_data);
flush_user_icache_range(mm->start_code, mm->end_code);
} else {
flush_other_dcache_range(mm->context, mm->start_data, mm->end_data);
flush_other_icache_range(mm->context, mm->start_code, mm->end_code);
}
}
#endif
#define flush_cache_range(vma, start, end) do { \
__flush_dcache_range(start, (unsigned long)end - (unsigned long)start); \
__flush_icache_range(start, (unsigned long)end - (unsigned long)start); \
} while(0)
#define flush_cache_page(vma, vmaddr) do { \
__flush_dcache_range(vmaddr, PAGE_SIZE); \
__flush_icache_range(vmaddr, PAGE_SIZE); \
} while(0)
#define flush_page_to_ram(page) \
__flush_page_to_ram((unsigned long)page_address(page))
#define flush_icache_range(start, end) \
__flush_icache_range(start, end - start)
#define flush_icache_user_range(vma, page, addr, len) \
flush_icache_page((vma), (page))
#define flush_icache_page(vma, page) \
__flush_icache_range(page_address(page), PAGE_SIZE)
#define flush_dcache_page(page) \
__flush_dcache_range(page_address(page), PAGE_SIZE)
/* TLB flushing routines.... */
extern void flush_data_tlb(void);
extern void flush_instruction_tlb(void);
#define flush_tlb() do { \
flush_data_tlb(); \
flush_instruction_tlb(); \
} while(0)
#define flush_tlb_all() flush_tlb() /* XXX p[id]tlb */
extern __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end)
{
}
static inline void flush_instruction_tlb_range(unsigned long start,
unsigned long size)
{
#if 0
register unsigned long count = (size / PAGE_SIZE);
register unsigned long loop = cache_info.it_loop;
register unsigned long i, j;
for(i = 0; i <= count; i++, start += PAGE_SIZE)
for(j = 0; j < loop; j++)
pitlbe(start);
#else
flush_instruction_tlb();
#endif
}
static inline void flush_data_tlb_range(unsigned long start,
unsigned long size)
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
#if 0
register unsigned long count = (size / PAGE_SIZE);
register unsigned long loop = cache_info.dt_loop;
register unsigned long i, j;
for(i = 0; i <= count; i++, start += PAGE_SIZE)
for(j = 0; j < loop; j++)
pdtlbe(start);
#else
flush_data_tlb();
#endif
}
static inline void __flush_tlb_range(unsigned long space, unsigned long start,
unsigned long size)
{
unsigned long old_sr1;
if(!size)
return;
old_sr1 = mfsp(1);
mtsp(space, 1);
flush_data_tlb_range(start, size);
flush_instruction_tlb_range(start, size);
mtsp(old_sr1, 1);
}
extern void __flush_tlb_space(unsigned long space);
static inline void flush_tlb_mm(struct mm_struct *mm)
{
#if 0
__flush_tlb_space(mm->context);
#else
flush_tlb();
#endif
pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
if (likely(pgd != NULL))
clear_page(pgd);
return pgd;
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
__flush_tlb_range(vma->vm_mm->context, addr, PAGE_SIZE);
}
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
__flush_tlb_range(vma->vm_mm->context, start, end - start);
}
/*
* NOTE: Many of the below macros use PT_NLEVELS because
* it is convenient that PT_NLEVELS == LOG2(pte size in bytes),
* i.e. we use 3 level page tables when we use 8 byte pte's
* (for 64 bit) and 2 level page tables when we use 4 byte pte's
*/
#ifdef __LP64__
#define PT_NLEVELS 3
#define PT_INITIAL 4 /* Number of initial page tables */
#else
#define PT_NLEVELS 2
#define PT_INITIAL 2 /* Number of initial page tables */
#endif
/* Definitions for 1st level */
#define PGDIR_SHIFT (PAGE_SHIFT + (PT_NLEVELS - 1)*(PAGE_SHIFT - PT_NLEVELS))
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
#define PTRS_PER_PGD (1UL << (PAGE_SHIFT - PT_NLEVELS))
#define USER_PTRS_PER_PGD (TASK_SIZE/PGDIR_SIZE)
/* Definitions for 2nd level */
#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - PT_NLEVELS))
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
#if PT_NLEVELS == 3
#define PTRS_PER_PMD (1UL << (PAGE_SHIFT - PT_NLEVELS))
#else
#define PTRS_PER_PMD 1
#endif
/* Definitions for 3rd level */
#define PTRS_PER_PTE (1UL << (PAGE_SHIFT - PT_NLEVELS))
#define get_pgd_fast get_pgd_slow
#define free_pgd_fast free_pgd_slow
extern __inline__ pgd_t *get_pgd_slow(void)
{
extern unsigned long gateway_pgd_offset;
extern unsigned long gateway_pgd_entry;
pgd_t *ret = (pgd_t *)__get_free_page(GFP_KERNEL);
if (ret) {
memset (ret, 0, PTRS_PER_PGD * sizeof(pgd_t));
/* Install HP-UX and Linux gateway page translations */
pgd_val(*(ret + gateway_pgd_offset)) = gateway_pgd_entry;
}
return ret;
}
extern __inline__ void free_pgd_slow(pgd_t *pgd)
static inline void pgd_free(pgd_t *pgd)
{
free_page((unsigned long)pgd);
}
#if PT_NLEVELS == 3
#ifdef __LP64__
/* Three Level Page Table Support for pmd's */
extern __inline__ pmd_t *get_pmd_fast(void)
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
{
return NULL; /* la la */
pgd_val(*pgd) = _PAGE_TABLE + __pa((unsigned long)pmd);
}
#if 0
extern __inline__ void free_pmd_fast(pmd_t *pmd)
{
}
#else
#define free_pmd_fast free_pmd_slow
#endif
extern __inline__ pmd_t *get_pmd_slow(void)
static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
pmd_t *pmd = (pmd_t *) __get_free_page(GFP_KERNEL);
if (pmd)
clear_page(pmd);
return pmd;
}
extern __inline__ void free_pmd_slow(pmd_t *pmd)
static inline void pmd_free(pmd_t *pmd)
{
free_page((unsigned long)pmd);
}
extern void __bad_pgd(pgd_t *pgd);
extern inline pmd_t * pmd_alloc(pgd_t *pgd, unsigned long address)
{
address = (address >> PMD_SHIFT) & (PTRS_PER_PMD - 1);
if (pgd_none(*pgd))
goto getnew;
if (pgd_bad(*pgd))
goto fix;
return (pmd_t *) pgd_page(*pgd) + address;
getnew:
{
pmd_t *page = get_pmd_fast();
if (!page)
page = get_pmd_slow();
if (page) {
if (pgd_none(*pgd)) {
pgd_val(*pgd) = _PAGE_TABLE + __pa((unsigned long)page);
return page + address;
}
else
free_pmd_fast(page);
}
else {
return NULL;
}
}
fix:
__bad_pgd(pgd);
return NULL;
}
#else
/* Two Level Page Table Support for pmd's */
extern inline pmd_t * pmd_alloc(pgd_t * pgd, unsigned long address)
{
return (pmd_t *) pgd;
}
/*
* allocating and freeing a pmd is trivial: the 1-entry pmd is
* inside the pgd, so has no extra memory associated with it.
*/
extern inline void free_pmd_fast(pmd_t * pmd)
{
}
#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
#define pmd_free(x) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG()
#endif
extern __inline__ pte_t *get_pte_fast(void)
{
return NULL; /* la la */
}
#if 0
extern __inline__ void free_pte_fast(pte_t *pte)
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
{
pmd_val(*pmd) = _PAGE_TABLE + __pa((unsigned long)pte);
}
#else
#define free_pte_fast free_pte_slow
#endif
extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted);
#define pmd_populate(mm, pmd, pte_page) \
pmd_populate_kernel(mm, pmd, page_address(pte_page))
extern __inline__ void free_pte_slow(pte_t *pte)
static inline struct page *
pte_alloc_one(struct mm_struct *mm, unsigned long address)
{
free_page((unsigned long)pte);
struct page *page = alloc_page(GFP_KERNEL);
if (likely(page != NULL))
clear_page(page_address(page));
return page;
}
#define pmd_alloc_kernel pmd_alloc
#define pte_alloc_kernel pte_alloc
#define pte_free(pte) free_pte_fast(pte)
#define pmd_free(pmd) free_pmd_fast(pmd)
#define pgd_free(pgd) free_pgd_fast(pgd)
#define pgd_alloc(mm) get_pgd_fast()
extern void __bad_pmd(pmd_t *pmd);
extern inline pte_t * pte_alloc(pmd_t * pmd, unsigned long address)
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
{
address = (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
pte_t *pte = (pte_t *) __get_free_page(GFP_KERNEL);
if (likely(pte != NULL))
clear_page(pte);
return pte;
}
if (pmd_none(*pmd))
goto getnew;
if (pmd_bad(*pmd))
goto fix;
return (pte_t *) pmd_page(*pmd) + address;
getnew:
static inline void pte_free_kernel(pte_t *pte)
{
pte_t *page = get_pte_fast();
if (!page)
return get_pte_slow(pmd, address);
pmd_val(*pmd) = _PAGE_TABLE + __pa((unsigned long)page);
return page + address;
}
fix:
__bad_pmd(pmd);
return NULL;
free_page((unsigned long)pte);
}
#define pte_free(page) pte_free_kernel(page_address(page))
extern int do_check_pgt_cache(int, int);
#define check_pgt_cache() do { } while (0)
#endif
#ifndef _PARISC_PGTABLE_H
#define _PARISC_PGTABLE_H
#include <asm/fixmap.h>
#ifndef __ASSEMBLY__
/*
* we simulate an x86-style page table for the linux mm code
*/
#include <linux/spinlock.h>
#include <asm/processor.h>
#include <asm/fixmap.h>
#include <asm/cache.h>
#include <asm/bitops.h>
/* To make 53c7xx.c happy */
#define IOMAP_FULL_CACHING 2 /* used for 'what' below */
#define IOMAP_NOCACHE_SER 3
extern void kernel_set_cachemode(unsigned long addr,
unsigned long size, int what);
/*
* cache_clear() semantics: Clear any cache entries for the area in question,
* without writing back dirty entries first. This is useful if the data will
* be overwritten anyway, e.g. by DMA to memory. The range is defined by a
* _physical_ address.
*/
#define cache_clear(paddr, len) do { } while (0)
/*
* cache_push() semantics: Write back any dirty cache data in the given area,
* and invalidate the range in the instruction cache. It needs not (but may)
* invalidate those entries also in the data cache. The range is defined by a
* _physical_ address.
*/
#define cache_push(paddr, len) \
do { \
unsigned long vaddr = phys_to_virt(paddr); \
flush_cache_range(0, vaddr, vaddr + len); \
} while(0)
#define cache_push_v(vaddr, len) \
flush_cache_range(0, vaddr, vaddr + len)
#define ARCH_STACK_GROWSUP
/*
* kern_addr_valid(ADDR) tests if ADDR is pointing to valid kernel
......@@ -63,8 +39,6 @@ extern void kernel_set_cachemode(unsigned long addr,
*(pteptr) = (pteval); \
} while(0)
#endif /* !__ASSEMBLY__ */
#define pte_ERROR(e) \
......@@ -74,11 +48,62 @@ extern void kernel_set_cachemode(unsigned long addr,
#define pgd_ERROR(e) \
printk("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e))
/* Note: If you change ISTACK_SIZE, you need to change the corresponding
* values in vmlinux.lds and vmlinux64.lds (init_istack section). Also,
* the "order" and size need to agree.
*/
#define ISTACK_SIZE 32768 /* Interrupt Stack Size */
#define ISTACK_ORDER 3
/*
* NOTE: Many of the below macros use PT_NLEVELS because
* it is convenient that PT_NLEVELS == LOG2(pte size in bytes),
* i.e. we use 3 level page tables when we use 8 byte pte's
* (for 64 bit) and 2 level page tables when we use 4 byte pte's
*/
#ifdef __LP64__
#define PT_NLEVELS 3
#define PT_INITIAL 4 /* Number of initial page tables */
#else
#define PT_NLEVELS 2
#define PT_INITIAL 2 /* Number of initial page tables */
#endif
#define MAX_ADDRBITS (PAGE_SHIFT + (PT_NLEVELS)*(PAGE_SHIFT - PT_NLEVELS))
#define MAX_ADDRESS (1UL << MAX_ADDRBITS)
#define SPACEID_SHIFT (MAX_ADDRBITS - 32)
/* Definitions for 1st level */
#define PGDIR_SHIFT (PAGE_SHIFT + (PT_NLEVELS - 1)*(PAGE_SHIFT - PT_NLEVELS))
#define PGDIR_SIZE (1UL << PGDIR_SHIFT)
#define PGDIR_MASK (~(PGDIR_SIZE-1))
#define PTRS_PER_PGD (1UL << (PAGE_SHIFT - PT_NLEVELS))
#define USER_PTRS_PER_PGD PTRS_PER_PGD
/* Definitions for 2nd level */
#define pgtable_cache_init() do { } while (0)
#define PMD_SHIFT (PAGE_SHIFT + (PAGE_SHIFT - PT_NLEVELS))
#define PMD_SIZE (1UL << PMD_SHIFT)
#define PMD_MASK (~(PMD_SIZE-1))
#if PT_NLEVELS == 3
#define PTRS_PER_PMD (1UL << (PAGE_SHIFT - PT_NLEVELS))
#else
#define PTRS_PER_PMD 1
#endif
/* Definitions for 3rd level */
#define PTRS_PER_PTE (1UL << (PAGE_SHIFT - PT_NLEVELS))
/*
* pgd entries used up by user/kernel:
*/
#define USER_PGD_PTRS (PAGE_OFFSET >> PGDIR_SHIFT)
#define FIRST_USER_PGD_NR 0
#ifndef __ASSEMBLY__
......@@ -89,35 +114,43 @@ extern void *vmalloc_start;
#define VMALLOC_END (FIXADDR_START)
#endif
#define _PAGE_READ 0x001 /* read access allowed */
#define _PAGE_WRITE 0x002 /* write access allowed */
#define _PAGE_EXEC 0x004 /* execute access allowed */
#define _PAGE_GATEWAY 0x008 /* privilege promotion allowed */
#define _PAGE_GATEWAY_BIT 28 /* _PAGE_GATEWAY & _PAGE_GATEWAY_BIT need */
/* to agree. One could be defined in relation */
/* to the other, but that's kind of ugly. */
/* 0x010 reserved (B bit) */
#define _PAGE_DIRTY 0x020 /* D: dirty */
/* 0x040 reserved (T bit) */
#define _PAGE_NO_CACHE 0x080 /* Software: Uncacheable */
#define _PAGE_NO_CACHE_BIT 24 /* Needs to agree with _PAGE_NO_CACHE above */
#define _PAGE_ACCESSED 0x100 /* R: page cache referenced */
#define _PAGE_PRESENT 0x200 /* Software: pte contains a translation */
#define _PAGE_PRESENT_BIT 22 /* Needs to agree with _PAGE_PRESENT above */
#define _PAGE_USER 0x400 /* Software: User accessable page */
#define _PAGE_USER_BIT 21 /* Needs to agree with _PAGE_USER above */
/* 0x800 still available */
#ifdef __ASSEMBLY__
#define _PGB_(x) (1 << (63 - (x)))
#define __PAGE_O _PGB_(13)
#define __PAGE_U _PGB_(12)
#define __PAGE_T _PGB_(2)
#define __PAGE_D _PGB_(3)
#define __PAGE_B _PGB_(4)
#define __PAGE_P _PGB_(14)
#endif
/* NB: The tlb miss handlers make certain assumptions about the order */
/* of the following bits, so be careful (One example, bits 25-31 */
/* are moved together in one instruction). */
#define _PAGE_READ_BIT 31 /* (0x001) read access allowed */
#define _PAGE_WRITE_BIT 30 /* (0x002) write access allowed */
#define _PAGE_EXEC_BIT 29 /* (0x004) execute access allowed */
#define _PAGE_GATEWAY_BIT 28 /* (0x008) privilege promotion allowed */
#define _PAGE_DMB_BIT 27 /* (0x010) Data Memory Break enable (B bit) */
#define _PAGE_DIRTY_BIT 26 /* (0x020) Page Dirty (D bit) */
#define _PAGE_REFTRAP_BIT 25 /* (0x040) Page Ref. Trap enable (T bit) */
#define _PAGE_NO_CACHE_BIT 24 /* (0x080) Uncached Page (U bit) */
#define _PAGE_ACCESSED_BIT 23 /* (0x100) Software: Page Accessed */
#define _PAGE_PRESENT_BIT 22 /* (0x200) Software: translation valid */
#define _PAGE_FLUSH_BIT 21 /* (0x400) Software: translation valid */
/* for cache flushing only */
#define _PAGE_USER_BIT 20 /* (0x800) Software: User accessable page */
/* N.B. The bits are defined in terms of a 32 bit word above, so the */
/* following macro is ok for both 32 and 64 bit. */
#define xlate_pabit(x) (31 - x)
#define _PAGE_READ (1 << xlate_pabit(_PAGE_READ_BIT))
#define _PAGE_WRITE (1 << xlate_pabit(_PAGE_WRITE_BIT))
#define _PAGE_RW (_PAGE_READ | _PAGE_WRITE)
#define _PAGE_EXEC (1 << xlate_pabit(_PAGE_EXEC_BIT))
#define _PAGE_GATEWAY (1 << xlate_pabit(_PAGE_GATEWAY_BIT))
#define _PAGE_DMB (1 << xlate_pabit(_PAGE_DMB_BIT))
#define _PAGE_DIRTY (1 << xlate_pabit(_PAGE_DIRTY_BIT))
#define _PAGE_REFTRAP (1 << xlate_pabit(_PAGE_REFTRAP_BIT))
#define _PAGE_NO_CACHE (1 << xlate_pabit(_PAGE_NO_CACHE_BIT))
#define _PAGE_ACCESSED (1 << xlate_pabit(_PAGE_ACCESSED_BIT))
#define _PAGE_PRESENT (1 << xlate_pabit(_PAGE_PRESENT_BIT))
#define _PAGE_FLUSH (1 << xlate_pabit(_PAGE_FLUSH_BIT))
#define _PAGE_USER (1 << xlate_pabit(_PAGE_USER_BIT))
#define _PAGE_TABLE (_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
#define _PAGE_CHG_MASK (PAGE_MASK | _PAGE_ACCESSED | _PAGE_DIRTY)
#define _PAGE_KERNEL (_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_WRITE | _PAGE_DIRTY | _PAGE_ACCESSED)
......@@ -138,6 +171,7 @@ extern void *vmalloc_start;
#define PAGE_KERNEL_RO __pgprot(_PAGE_PRESENT | _PAGE_EXEC | _PAGE_READ | _PAGE_DIRTY | _PAGE_ACCESSED)
#define PAGE_KERNEL_UNC __pgprot(_PAGE_KERNEL | _PAGE_NO_CACHE)
#define PAGE_GATEWAY __pgprot(_PAGE_PRESENT | _PAGE_USER | _PAGE_ACCESSED | _PAGE_GATEWAY| _PAGE_READ)
#define PAGE_FLUSH __pgprot(_PAGE_FLUSH)
/*
......@@ -167,7 +201,7 @@ extern void *vmalloc_start;
#define __S110 PAGE_RWX
#define __S111 PAGE_RWX
extern unsigned long swapper_pg_dir[]; /* declared in init_task.c */
extern pgd_t swapper_pg_dir[]; /* declared in init_task.c */
/* initial page tables for 0-8MB for kernel */
......@@ -178,23 +212,15 @@ extern unsigned long pg0[];
extern unsigned long *empty_zero_page;
/*
* BAD_PAGETABLE is used when we need a bogus page-table, while
* BAD_PAGE is used for a bogus page.
*
* ZERO_PAGE is a global shared page that is always zero: used
* for zero-mapped memory areas etc..
*/
extern pte_t __bad_page(void);
extern pte_t * __bad_pagetable(void);
#define BAD_PAGETABLE __bad_pagetable()
#define BAD_PAGE __bad_page()
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
#define pte_none(x) (!pte_val(x))
#define pte_none(x) ((pte_val(x) == 0) || (pte_val(x) & _PAGE_FLUSH))
#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
#define pte_clear(xp) do { pte_val(*(xp)) = 0; } while (0)
#define pte_pagenr(x) ((unsigned long)((pte_val(x) >> PAGE_SHIFT)))
#define pmd_none(x) (!pmd_val(x))
#define pmd_bad(x) ((pmd_val(x) & ~PAGE_MASK) != _PAGE_TABLE)
......@@ -255,14 +281,14 @@ extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return
__pte; \
})
#define mk_pte(page,pgprot) \
({ \
pte_t __pte; \
\
pte_val(__pte) = ((page)-mem_map)*PAGE_SIZE + \
pgprot_val(pgprot); \
__pte; \
})
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot)
{
pte_t pte;
pte_val(pte) = (pfn << PAGE_SHIFT) | pgprot_val(pgprot);
return pte;
}
/* This takes a physical page address that is used by the remapping functions */
#define mk_pte_phys(physpage, pgprot) \
......@@ -271,15 +297,20 @@ extern inline pte_t pte_mkwrite(pte_t pte) { pte_val(pte) |= _PAGE_WRITE; return
extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{ pte_val(pte) = (pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot); return pte; }
/*
* Permanent address of a page. Obviously must never be
* called on a highmem page.
*/
#define __page_address(page) ({ if (PageHighMem(page)) BUG(); PAGE_OFFSET + (((page) - mem_map) << PAGE_SHIFT); })
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
#define pte_page(x) (mem_map+pte_pagenr(x))
/* Permanent address of a page. On parisc we don't have highmem. */
#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
#ifdef CONFIG_DISCONTIGMEM
#define pte_page(x) (phys_to_page(pte_val(x)))
#else
#define pte_page(x) (mem_map+(pte_val(x) >> PAGE_SHIFT))
#endif
#define pmd_page_kernel(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
#define pmd_page(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
#define __pmd_page(pmd) ((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
#define pmd_page(pmd) virt_to_page((void *)__pmd_page(pmd))
#define pgd_index(address) ((address) >> PGDIR_SHIFT)
......@@ -300,31 +331,110 @@ extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
#endif
/* Find an entry in the third-level page table.. */
#define pte_offset(pmd, address) \
((pte_t *) pmd_page(*(pmd)) + (((address)>>PAGE_SHIFT) & (PTRS_PER_PTE-1)))
#define __pte_offset(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
#define pte_offset_kernel(pmd, address) \
((pte_t *) pmd_page_kernel(*(pmd)) + __pte_offset(address))
#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
#define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address)
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
extern void paging_init (void);
extern inline void update_mmu_cache(struct vm_area_struct * vma,
unsigned long address, pte_t pte)
{
}
/* Used for deferring calls to flush_dcache_page() */
#define PG_dcache_dirty PG_arch_1
struct vm_area_struct; /* forward declaration (include/linux/mm.h) */
extern void update_mmu_cache(struct vm_area_struct *, unsigned long, pte_t);
/* Encode and de-code a swap entry */
#define __swp_type(x) ((x).val & 0x3f)
#define __swp_offset(x) ( (((x).val >> 6) & 0x7) | \
(((x).val >> 7) & ~0x7) )
#define __swp_type(x) ((x).val & 0x1f)
#define __swp_offset(x) ( (((x).val >> 5) & 0xf) | \
(((x).val >> 7) & ~0xf) )
#define __swp_entry(type, offset) ((swp_entry_t) { (type) | \
((offset & 0x7) << 6) | \
((offset & ~0x7) << 7) })
((offset & 0xf) << 5) | \
((offset & ~0xf) << 7) })
#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
#define module_map vmalloc
#define module_unmap vfree
static inline int ptep_test_and_clear_young(pte_t *ptep)
{
#ifdef CONFIG_SMP
return test_and_clear_bit(xlate_pabit(_PAGE_ACCESSED_BIT), ptep);
#else
pte_t pte = *ptep;
if (!pte_young(pte))
return 0;
set_pte(ptep, pte_mkold(pte));
return 1;
#endif
}
#include <asm-generic/pgtable.h>
static inline int ptep_test_and_clear_dirty(pte_t *ptep)
{
#ifdef CONFIG_SMP
return test_and_clear_bit(xlate_pabit(_PAGE_DIRTY_BIT), ptep);
#else
pte_t pte = *ptep;
if (!pte_dirty(pte))
return 0;
set_pte(ptep, pte_mkclean(pte));
return 1;
#endif
}
#ifdef CONFIG_SMP
extern spinlock_t pa_dbit_lock;
#else
static int pa_dbit_lock; /* dummy to keep the compilers happy */
#endif
static inline pte_t ptep_get_and_clear(pte_t *ptep)
{
pte_t old_pte;
pte_t pte;
spin_lock(&pa_dbit_lock);
pte = old_pte = *ptep;
pte_val(pte) &= ~_PAGE_PRESENT;
pte_val(pte) |= _PAGE_FLUSH;
set_pte(ptep,pte);
spin_unlock(&pa_dbit_lock);
return old_pte;
}
static inline void ptep_set_wrprotect(pte_t *ptep)
{
#ifdef CONFIG_SMP
unsigned long new, old;
do {
old = pte_val(*ptep);
new = pte_val(pte_wrprotect(__pte (old)));
} while (cmpxchg((unsigned long *) ptep, old, new) != old);
#else
pte_t old_pte = *ptep;
set_pte(ptep, pte_wrprotect(old_pte));
#endif
}
static inline void ptep_mkdirty(pte_t *ptep)
{
#ifdef CONFIG_SMP
set_bit(xlate_pabit(_PAGE_DIRTY_BIT), ptep);
#else
pte_t old_pte = *ptep;
set_pte(ptep, pte_mkdirty(old_pte));
#endif
}
#define pte_same(A,B) (pte_val(A) == pte_val(B))
typedef pte_t *pte_addr_t;
......@@ -332,9 +442,8 @@ typedef pte_t *pte_addr_t;
#define io_remap_page_range remap_page_range
/*
* No page table caches to initialise
*/
#define pgtable_cache_init() do { } while (0)
/* We provide our own get_unmapped_area to provide cache coherency */
#define HAVE_ARCH_UNMAPPED_AREA
#endif /* _PARISC_PAGE_H */
#endif /* _PARISC_PGTABLE_H */
......@@ -19,10 +19,17 @@ typedef int __kernel_suseconds_t;
typedef int __kernel_clock_t;
typedef int __kernel_daddr_t;
/* Note these change from narrow to wide kernels */
#ifdef __LP64__
typedef unsigned long __kernel_size_t;
typedef long __kernel_ssize_t;
typedef long __kernel_ptrdiff_t;
typedef long __kernel_time_t;
#else
typedef unsigned int __kernel_size_t;
typedef int __kernel_ssize_t;
typedef int __kernel_ptrdiff_t;
typedef int __kernel_time_t;
#endif
typedef char * __kernel_caddr_t;
typedef unsigned short __kernel_uid16_t;
......@@ -44,6 +51,10 @@ typedef struct {
#endif /* !defined(__KERNEL__) && !defined(__USE_ALL) */
} __kernel_fsid_t;
/* compatibility stuff */
typedef __kernel_uid_t __kernel_old_uid_t;
typedef __kernel_gid_t __kernel_old_gid_t;
#if defined(__KERNEL__) && defined(__LP64__)
/* Now 32bit compatibility types */
typedef unsigned int __kernel_dev_t32;
......
......@@ -2,12 +2,14 @@
* include/asm-parisc/processor.h
*
* Copyright (C) 1994 Linus Torvalds
* Copyright (C) 2001 Grant Grundler
*/
#ifndef __ASM_PARISC_PROCESSOR_H
#define __ASM_PARISC_PROCESSOR_H
#ifndef __ASSEMBLY__
#include <linux/config.h>
#include <linux/threads.h>
#include <asm/hardware.h>
......@@ -15,8 +17,11 @@
#include <asm/pdc.h>
#include <asm/ptrace.h>
#include <asm/types.h>
#include <asm/system.h>
#endif /* __ASSEMBLY__ */
#define KERNEL_STACK_SIZE (4*PAGE_SIZE)
/*
* Default implementation of macro that returns current
* instruction pointer ("program counter").
......@@ -30,8 +35,11 @@
#define current_text_addr() ({ void *pc; __asm__("\n\tblr 0,%0\n\tnop":"=r" (pc)); pc; })
#define TASK_SIZE (PAGE_OFFSET)
#define TASK_UNMAPPED_BASE (TASK_SIZE / 3)
#define TASK_SIZE (current->thread.task_size)
#define DEFAULT_TASK_SIZE (0xFFF00000UL)
#define TASK_UNMAPPED_BASE (current->thread.map_base)
#define DEFAULT_MAP_BASE (0x40000000UL)
#ifndef __ASSEMBLY__
......@@ -50,17 +58,14 @@ struct system_cpuinfo_parisc {
struct {
struct pdc_model model;
struct pdc_model_cpuid /* ARGH */ versions;
struct pdc_model_cpuid cpuid;
#if 0
struct pdc_model_caps caps;
#endif
unsigned long versions;
unsigned long cpuid;
unsigned long capabilities;
char sys_model_name[81]; /* PDC-ROM returnes this model name */
} pdc;
char *model_name;
char *cpu_name;
char *family_name;
char *cpu_name; /* e.g. "PA7300LC (PCX-L2)" */
char *family_name; /* e.g. "1.1e" */
};
......@@ -68,29 +73,41 @@ struct system_cpuinfo_parisc {
** Per CPU data structure - ie varies per CPU.
*/
struct cpuinfo_parisc {
unsigned cpuid;
struct irq_region *region;
unsigned long it_value; /* Interval Timer value at last timer interrupt */
unsigned long it_delta; /* Interval Timer delta (tic_10ms / HZ * 100) */
unsigned long hpa; /* Host Physical address */
unsigned long txn_addr; /* External Interrupt Register or id_eid */
unsigned long bh_count; /* number of times bh was invoked */
unsigned long irq_count; /* number of IRQ's since boot */
unsigned long irq_max_cr16; /* longest time to handle a single IRQ */
unsigned long it_value; /* Interval Timer value at last timer Intr */
unsigned long it_delta; /* Interval Timer delta (tic_10ms / HZ * 100) */
unsigned long irq_count; /* number of IRQ's since boot */
unsigned long irq_max_cr16; /* longest time to handle a single IRQ */
unsigned long cpuid; /* aka slot_number or set to NO_PROC_ID */
unsigned long hpa; /* Host Physical address */
unsigned long txn_addr; /* MMIO addr of EIR or id_eid */
#ifdef CONFIG_SMP
spinlock_t lock; /* synchronization for ipi's */
unsigned long pending_ipi; /* bitmap of type ipi_message_type */
unsigned long ipi_count; /* number ipi Interrupts */
#endif
unsigned long bh_count; /* number of times bh was invoked */
unsigned long prof_counter; /* per CPU profiling support */
unsigned long prof_multiplier; /* per CPU profiling support */
unsigned long fp_rev;
unsigned long fp_model;
unsigned int state;
struct parisc_device *dev;
};
extern struct system_cpuinfo_parisc boot_cpu_data;
extern struct cpuinfo_parisc cpu_data[NR_CPUS];
#define current_cpu_data cpu_data[smp_processor_id()]
extern void identify_cpu(struct cpuinfo_parisc *);
#define CPU_HVERSION ((boot_cpu_data.hversion >> 4) & 0x0FFF)
#ifdef CONFIG_EISA
extern int EISA_bus;
#else
#define EISA_bus 0
#endif
#define EISA_bus 0 /* we don't have ISA support yet */
#define EISA_bus__is_a_macro /* for versions in ksyms.c */
#define MCA_bus 0
#define MCA_bus__is_a_macro /* for versions in ksyms.c */
......@@ -100,31 +117,32 @@ typedef struct {
struct thread_struct {
struct pt_regs regs;
unsigned long pg_tables;
unsigned long task_size;
unsigned long map_base;
unsigned long flags;
};
/* Thread struct flags. */
#define PARISC_KERNEL_DEATH (1UL << 31) /* see die_if_kernel()... */
#define INIT_THREAD { { \
{ 0, 0, 0, 0, 0, 0, 0, 0, \
0, 0, 0, 0, 0, 0, 0, 0, \
0, 0, 0, 0, 0, 0, 0, 0, \
0, 0, 0, 0, 0, 0, 0, 0 }, \
{ 0, 0, 0, 0, 0, 0, 0, 0, \
0, 0, 0, 0, 0, 0, 0, 0, \
0, 0, 0, 0, 0, 0, 0, 0, \
0, 0, 0, 0, 0, 0, 0, 0 }, \
{ 0, 0, 0, 0, 0, 0, 0, 0 }, \
{ 0, 0}, { 0, 0}, 0, 0, 0, 0 \
}, __pa((unsigned long) swapper_pg_dir) }
#define INIT_THREAD { \
regs: { gr: { 0, }, \
fr: { 0, }, \
sr: { 0, }, \
iasq: { 0, }, \
iaoq: { 0, }, \
cr27: 0, \
}, \
task_size: DEFAULT_TASK_SIZE, \
map_base: DEFAULT_MAP_BASE, \
flags: 0 \
}
/*
* Return saved PC of a blocked thread. This is used by ps mostly.
*/
extern inline unsigned long thread_saved_pc(struct thread_struct *t)
static inline unsigned long thread_saved_pc(struct task_struct *t)
{
return 0xabcdef;
}
......@@ -144,6 +162,35 @@ extern inline unsigned long thread_saved_pc(struct thread_struct *t)
* We also initialize sr3 to an illegal value (illegal for our
* implementation, not for the architecture).
*/
typedef unsigned int elf_caddr_t;
#define start_thread_som(regs, new_pc, new_sp) do { \
unsigned long *sp = (unsigned long *)new_sp; \
__u32 spaceid = (__u32)current->mm->context; \
unsigned long pc = (unsigned long)new_pc; \
/* offset pc for priv. level */ \
pc |= 3; \
\
set_fs(USER_DS); \
regs->iasq[0] = spaceid; \
regs->iasq[1] = spaceid; \
regs->iaoq[0] = pc; \
regs->iaoq[1] = pc + 4; \
regs->sr[2] = LINUX_GATEWAY_SPACE; \
regs->sr[3] = 0xffff; \
regs->sr[4] = spaceid; \
regs->sr[5] = spaceid; \
regs->sr[6] = spaceid; \
regs->sr[7] = spaceid; \
regs->gr[ 0] = USER_PSW; \
regs->gr[30] = ((new_sp)+63)&~63; \
regs->gr[31] = pc; \
\
get_user(regs->gr[26],&sp[0]); \
get_user(regs->gr[25],&sp[-1]); \
get_user(regs->gr[24],&sp[-2]); \
get_user(regs->gr[23],&sp[-3]); \
} while(0)
/* The ELF abi wants things done a "wee bit" differently than
* som does. Supporting this behavior here avoids
......@@ -163,22 +210,44 @@ extern inline unsigned long thread_saved_pc(struct thread_struct *t)
| 32 bytes of magic |
|---------------------------------|
| 32 bytes argument/sp save area |
|---------------------------------| ((current->mm->env_end) + 63 & ~63)
| N bytes of slack |
|---------------------------------|
| envvar and arg strings |
|---------------------------------|
|---------------------------------| (bprm->p)
| ELF auxiliary info |
| (up to 28 words) |
|---------------------------------|
| Environment variable pointers |
| upwards to NULL |
| NULL |
|---------------------------------|
| Argument pointers |
| upwards to NULL |
| Environment pointers |
|---------------------------------|
| NULL |
|---------------------------------|
| Argument pointers |
|---------------------------------| <- argv
| argc (1 word) |
-----------------------------------
|---------------------------------| <- bprm->exec (HACK!)
| N bytes of slack |
|---------------------------------|
| filename passed to execve |
|---------------------------------| (mm->env_end)
| env strings |
|---------------------------------| (mm->env_start, mm->arg_end)
| arg strings |
|---------------------------------|
| additional faked arg strings if |
| we're invoked via binfmt_script |
|---------------------------------| (mm->arg_start)
stack base is at TASK_SIZE - rlim_max.
on downward growing arches, it looks like this:
stack base at TASK_SIZE
| filename passed to execve
| env strings
| arg strings
| faked arg strings
| slack
| ELF
| envps
| argvs
| argc
* The pleasant part of this is that if we need to skip arguments we
* can just decrement argc and move argv, because the stack pointer
......@@ -186,154 +255,78 @@ extern inline unsigned long thread_saved_pc(struct thread_struct *t)
* argument vectors.
*
* Note that the S/390 people took the easy way out and hacked their
* GCC to make the stack grow downwards. */
#define start_thread_som(regs, new_pc, new_sp) do { \
unsigned long *sp = (unsigned long *)new_sp; \
__u32 spaceid = (__u32)current->mm->context; \
unsigned long pc = (unsigned long)new_pc; \
/* offset pc for priv. level */ \
pc |= 3; \
\
set_fs(USER_DS); \
regs->iasq[0] = spaceid; \
regs->iasq[1] = spaceid; \
regs->iaoq[0] = pc; \
regs->iaoq[1] = pc; \
regs->sr[2] = LINUX_GATEWAY_SPACE; \
regs->sr[3] = 0xffff; \
regs->sr[4] = spaceid; \
regs->sr[5] = spaceid; \
regs->sr[6] = spaceid; \
regs->sr[7] = spaceid; \
regs->gr[ 0] = USER_INIT_PSW; \
regs->gr[30] = ((new_sp)+63)&~63; \
regs->gr[31] = pc; \
\
get_user(regs->gr[26],&sp[0]); \
get_user(regs->gr[25],&sp[-1]); \
get_user(regs->gr[24],&sp[-2]); \
get_user(regs->gr[23],&sp[-3]); \
\
regs->cr30 = (u32) current; \
} while(0)
#define start_thread(regs, new_pc, new_sp) do { \
unsigned long *sp = (unsigned long *)new_sp; \
__u32 spaceid = (__u32)current->mm->context; \
unsigned long pc = (unsigned long)new_pc; \
/* offset pc for priv. level */ \
pc |= 3; \
\
\
set_fs(USER_DS); \
regs->iasq[0] = spaceid; \
regs->iasq[1] = spaceid; \
regs->iaoq[0] = pc; \
regs->iaoq[1] = pc; \
regs->sr[2] = LINUX_GATEWAY_SPACE; \
regs->sr[3] = 0xffff; \
regs->sr[4] = spaceid; \
regs->sr[5] = spaceid; \
regs->sr[6] = spaceid; \
regs->sr[7] = spaceid; \
regs->gr[ 0] = USER_INIT_PSW; \
regs->fr[ 0] = 0LL; \
regs->fr[ 1] = 0LL; \
regs->fr[ 2] = 0LL; \
regs->fr[ 3] = 0LL; \
regs->gr[30] = ((current->mm->env_end)+63)&~63; \
regs->gr[31] = pc; \
\
get_user(regs->gr[25],&sp[0]); \
regs->gr[24] = (unsigned long) &sp[1]; \
regs->gr[23] = 0; \
\
regs->cr30 = (u32) current; \
} while(0)
#ifdef __LP64__
/*
* For 64 bit kernels we need a version of start thread for 32 bit
* elf files.
*
* FIXME: It should be possible to not duplicate the above code
* by playing games with concatenation to form both
* macros at compile time. The only difference between
* this macro and the above is the name and the types
* for sp and pc.
* GCC to make the stack grow downwards.
*/
#define start_thread32(regs, new_pc, new_sp) do { \
__u32 *sp = (__u32 *)new_sp; \
#define start_thread(regs, new_pc, new_sp) do { \
elf_addr_t *sp = (elf_addr_t *)new_sp; \
__u32 spaceid = (__u32)current->mm->context; \
__u32 pc = (__u32)new_pc; \
/* offset pc for priv. level */ \
pc |= 3; \
elf_addr_t pc = (elf_addr_t)new_pc | 3; \
elf_caddr_t *argv = (elf_caddr_t *)bprm->exec + 1; \
\
set_fs(USER_DS); \
regs->iasq[0] = spaceid; \
regs->iasq[1] = spaceid; \
regs->iaoq[0] = pc; \
regs->iaoq[1] = pc; \
regs->iaoq[1] = pc + 4; \
regs->sr[2] = LINUX_GATEWAY_SPACE; \
regs->sr[3] = 0xffff; \
regs->sr[4] = spaceid; \
regs->sr[5] = spaceid; \
regs->sr[6] = spaceid; \
regs->sr[7] = spaceid; \
regs->gr[ 0] = USER_INIT_PSW; \
regs->gr[ 0] = USER_PSW; \
regs->fr[ 0] = 0LL; \
regs->fr[ 1] = 0LL; \
regs->fr[ 2] = 0LL; \
regs->fr[ 3] = 0LL; \
regs->gr[30] = ((current->mm->env_end)+63)&~63; \
regs->gr[30] = ((unsigned long)sp + 63) &~ 63; \
regs->gr[31] = pc; \
\
get_user(regs->gr[25],&sp[0]); \
regs->gr[24] = (unsigned long) &sp[1]; \
regs->gr[23] = 0; \
\
regs->cr30 = (u32) current; \
get_user(regs->gr[25], (argv - 1)); \
regs->gr[24] = (long) argv; \
regs->gr[23] = 0; \
} while(0)
#endif
struct task_struct;
struct mm_struct;
/* Free all resources held by a thread. */
extern void release_thread(struct task_struct *);
extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
#define copy_segments(tsk, mm) do { } while (0)
extern void map_hpux_gateway_page(struct task_struct *tsk, struct mm_struct *mm);
#define copy_segments(tsk, mm) do { \
if (tsk->personality == PER_HPUX) \
map_hpux_gateway_page(tsk,mm); \
} while (0)
#define release_segments(mm) do { } while (0)
extern inline unsigned long get_wchan(struct task_struct *p)
static inline unsigned long get_wchan(struct task_struct *p)
{
return 0xdeadbeef; /* XXX */
}
#define KSTK_EIP(tsk) (0xdeadbeef)
#define KSTK_ESP(tsk) (0xdeadbeef)
/* Be sure to hunt all references to this down when you change the size of
* the kernel stack */
#define KSTK_EIP(tsk) ((tsk)->thread.regs.iaoq[0])
#define KSTK_ESP(tsk) ((tsk)->thread.regs.gr[30])
#endif /* __ASSEMBLY__ */
#define THREAD_SIZE (4*PAGE_SIZE)
#define alloc_task_struct() \
((struct task_struct *) __get_free_pages(GFP_KERNEL,2))
#define free_task_struct(p) free_pages((unsigned long)(p),2)
#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count)
#ifdef CONFIG_PA20
#define ARCH_HAS_PREFETCH
extern inline void prefetch(const void *addr)
{
__asm__("ldw 0(%0), %%r0" : : "r" (addr));
}
#define init_task (init_task_union.task)
#define init_stack (init_task_union.stack)
#define ARCH_HAS_PREFETCHW
extern inline void prefetchw(const void *addr)
{
__asm__("ldd 0(%0), %%r0" : : "r" (addr));
}
#endif
#define cpu_relax() barrier()
#endif /* __ASM_PARISC_PROCESSOR_H */
......@@ -9,6 +9,8 @@
#define PSW_G 0x00000040 /* PA1.x only */
#define PSW_O 0x00000080 /* PA2.0 only */
#define PSW_CB 0x0000ff00
#define PSW_M 0x00010000
#define PSW_V 0x00020000
#define PSW_C 0x00040000
......@@ -23,10 +25,15 @@
#define PSW_S 0x02000000
#define PSW_E 0x04000000
#define PSW_W 0x08000000 /* PA2.0 only */
#define PSW_W_BIT 36 /* PA2.0 only */
#define PSW_Z 0x40000000 /* PA1.x only */
#define PSW_Y 0x80000000 /* PA1.x only */
#ifdef __LP64__
#define PSW_HI_CB 0x000000ff /* PA2.0 only */
#endif
/* PSW bits to be used with ssm/rsm */
#define PSW_SM_I 0x1
#define PSW_SM_D 0x2
......@@ -40,15 +47,16 @@
#define PSW_SM_W 0x200
#ifdef __LP64__
# define USER_PSW (PSW_C | PSW_D | PSW_Q | PSW_I)
# define USER_INIT_PSW (PSW_C | PSW_D | PSW_Q | PSW_I | PSW_N)
# define KERNEL_PSW (PSW_C | PSW_D | PSW_Q | PSW_W)
# define PDC_PSW (PSW_Q | PSW_W)
# define USER_PSW (PSW_C | PSW_Q | PSW_P | PSW_D | PSW_I)
# define KERNEL_PSW (PSW_W | PSW_C | PSW_Q | PSW_P | PSW_D)
# define REAL_MODE_PSW (PSW_W | PSW_Q)
# define USER_PSW_MASK (PSW_W | PSW_T | PSW_N | PSW_X | PSW_B | PSW_V | PSW_CB)
# define USER_PSW_HI_MASK (PSW_HI_CB)
#else
# define USER_PSW (PSW_C | PSW_D | PSW_Q | PSW_I | PSW_P)
# define USER_INIT_PSW (PSW_C | PSW_D | PSW_Q | PSW_I | PSW_N)
# define KERNEL_PSW (PSW_C | PSW_D | PSW_Q)
# define PDC_PSW (PSW_Q)
# define USER_PSW (PSW_C | PSW_Q | PSW_P | PSW_D | PSW_I)
# define KERNEL_PSW (PSW_C | PSW_Q | PSW_P | PSW_D)
# define REAL_MODE_PSW (PSW_Q)
# define USER_PSW_MASK (PSW_T | PSW_N | PSW_X | PSW_B | PSW_V | PSW_CB)
#endif
#endif
......@@ -8,7 +8,12 @@
#include <linux/types.h>
/* This struct defines the way the registers are stored on the
stack during a system call. */
* stack during a system call.
*
* N.B. gdb/strace care about the size and offsets within this
* structure. If you change things, you may break object compatibility
* for those applications.
*/
struct pt_regs {
unsigned long gr[32]; /* PSW is in gr[0] */
......@@ -16,11 +21,8 @@ struct pt_regs {
unsigned long sr[ 8];
unsigned long iasq[2];
unsigned long iaoq[2];
unsigned long cr24;
unsigned long cr25;
unsigned long cr26;
unsigned long cr27;
unsigned long cr30;
unsigned long pad0; /* available for other uses */
unsigned long orig_r28;
unsigned long ksp;
unsigned long kpc;
......@@ -29,7 +31,6 @@ struct pt_regs {
unsigned long isr; /* CR20 */
unsigned long ior; /* CR21 */
unsigned long ipsw; /* CR22 */
unsigned long cr_pid[4]; /* CR8,9,12,13 */
};
#define task_regs(task) ((struct pt_regs *) ((char *)(task) + TASK_REGS))
......
#ifndef _ASM_PARISC_RT_SIGFRAME_H
#define _ASM_PARISC_RT_SIGFRAME_H
struct rt_sigframe {
unsigned int tramp[4];
struct siginfo info;
struct ucontext uc;
};
/*
* The 32-bit ABI wants at least 48 bytes for a function call frame:
* 16 bytes for arg0-arg3, and 32 bytes for magic (the only part of
* which Linux/parisc uses is sp-20 for the saved return pointer...)
* Then, the stack pointer must be rounded to a cache line (64 bytes).
*/
#define PARISC_RT_SIGFRAME_SIZE \
(((sizeof(struct rt_sigframe) + 48) + 63) & -64)
#endif
......@@ -5,5 +5,8 @@
/* declared in arch/parisc/kernel/setup.c */
extern struct proc_dir_entry * proc_runway_root;
#define RUNWAY_STATUS 0x10
#define RUNWAY_DEBUG 0x40
#endif /* __KERNEL__ */
#endif /* ASM_PARISC_RUNWAY_H */
#ifndef _ASM_PARISC_SCATTERLIST_H
#define _ASM_PARISC_SCATTERLIST_H
#include <asm/page.h>
struct scatterlist {
struct page *page;
unsigned int offset;
unsigned int length;
/* an IOVA can be 64-bits on some PA-Risc platforms. */
......@@ -11,6 +14,7 @@ struct scatterlist {
__u32 iova_length; /* bytes mapped */
};
#define sg_virt_addr(sg) ((unsigned long)(page_address(sg->page) + sg->offset))
#define sg_dma_address(sg) ((sg)->iova)
#define sg_dma_len(sg) ((sg)->iova_length)
......
#ifndef _ASM_PARISC_SEMAPHORE_H
#define _ASM_PARISC_SEMAPHORE_H
#include <linux/linkage.h>
/*
* SMP- and interrupt-safe semaphores.
*
* (C) Copyright 1996 Linus Torvalds
*
* SuperH verison by Niibe Yutaka
* PA-RISC version by Matthew Wilcox
*
*/
#include <linux/spinlock.h>
#include <linux/wait.h>
#include <linux/rwsem.h>
#include <asm/system.h>
#include <asm/atomic.h>
/*
* The `count' is initialised to the number of people who are allowed to
* take the lock. (Normally we want a mutex, so this is `1'). if
* `count' is positive, the lock can be taken. if it's 0, no-one is
* waiting on it. if it's -1, at least one task is waiting.
*/
struct semaphore {
atomic_t count;
int waking;
spinlock_t sentry;
int count;
wait_queue_head_t wait;
#if WAITQUEUE_DEBUG
long __magic;
......@@ -35,7 +39,7 @@ struct semaphore {
#endif
#define __SEMAPHORE_INITIALIZER(name,count) \
{ ATOMIC_INIT(count), 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
{ SPIN_LOCK_UNLOCKED, count, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
__SEM_DEBUG_INIT(name) }
#define __MUTEX_INITIALIZER(name) \
......@@ -49,18 +53,7 @@ struct semaphore {
extern inline void sema_init (struct semaphore *sem, int val)
{
/*
* *sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
*
* i'd rather use the more flexible initialization above, but sadly
* GCC 2.7.2.3 emits a bogus warning. EGCS doesn't. Oh well.
*/
atomic_set(&sem->count, val);
sem->waking = 0;
init_waitqueue_head(&sem->wait);
#if WAITQUEUE_DEBUG
sem->__magic = (long)&sem->__magic;
#endif
*sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
}
static inline void init_MUTEX (struct semaphore *sem)
......@@ -73,17 +66,18 @@ static inline void init_MUTEX_LOCKED (struct semaphore *sem)
sema_init(sem, 0);
}
asmlinkage void __down_failed(void /* special register calling convention */);
asmlinkage int __down_failed_interruptible(void /* params in registers */);
asmlinkage int __down_failed_trylock(void /* params in registers */);
asmlinkage void __up_wakeup(void /* special register calling convention */);
static inline int sem_getcount(struct semaphore *sem)
{
return sem->count;
}
asmlinkage void __down(struct semaphore * sem);
asmlinkage int __down_interruptible(struct semaphore * sem);
asmlinkage int __down_trylock(struct semaphore * sem);
asmlinkage void __up(struct semaphore * sem);
extern spinlock_t semaphore_wake_lock;
/* Semaphores can be `tried' from irq context. So we have to disable
* interrupts while we're messing with the semaphore. Sorry.
*/
extern __inline__ void down(struct semaphore * sem)
{
......@@ -91,8 +85,13 @@ extern __inline__ void down(struct semaphore * sem)
CHECK_MAGIC(sem->__magic);
#endif
if (atomic_dec_return(&sem->count) < 0)
spin_lock_irq(&sem->sentry);
if (sem->count > 0) {
sem->count--;
} else {
__down(sem);
}
spin_unlock_irq(&sem->sentry);
}
extern __inline__ int down_interruptible(struct semaphore * sem)
......@@ -102,21 +101,33 @@ extern __inline__ int down_interruptible(struct semaphore * sem)
CHECK_MAGIC(sem->__magic);
#endif
if (atomic_dec_return(&sem->count) < 0)
spin_lock_irq(&sem->sentry);
if (sem->count > 0) {
sem->count--;
} else {
ret = __down_interruptible(sem);
}
spin_unlock_irq(&sem->sentry);
return ret;
}
/*
* down_trylock returns 0 on success, 1 if we failed to get the lock.
* May not sleep, but must preserve irq state
*/
extern __inline__ int down_trylock(struct semaphore * sem)
{
int ret = 0;
int flags, count;
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
if (atomic_dec_return(&sem->count) < 0)
ret = __down_trylock(sem);
return ret;
spin_lock_irqsave(&sem->sentry, flags);
count = sem->count - 1;
if (count >= 0)
sem->count = count;
spin_unlock_irqrestore(&sem->sentry, flags);
return (count < 0);
}
/*
......@@ -125,11 +136,17 @@ extern __inline__ int down_trylock(struct semaphore * sem)
*/
extern __inline__ void up(struct semaphore * sem)
{
int flags;
#if WAITQUEUE_DEBUG
CHECK_MAGIC(sem->__magic);
#endif
if (atomic_inc_return(&sem->count) <= 0)
spin_lock_irqsave(&sem->sentry, flags);
if (sem->count < 0) {
__up(sem);
} else {
sem->count++;
}
spin_unlock_irqrestore(&sem->sentry, flags);
}
#endif /* _ASM_PARISC_SEMAPHORE_H */
......@@ -13,9 +13,13 @@
struct semid64_ds {
struct ipc64_perm sem_perm; /* permissions .. see ipc.h */
#ifndef __LP64__
unsigned int __pad1;
#endif
__kernel_time_t sem_otime; /* last semop time */
#ifndef __LP64__
unsigned int __pad2;
#endif
__kernel_time_t sem_ctime; /* last change time */
unsigned int sem_nsems; /* no. of semaphores in array */
unsigned int __unused1;
......
......@@ -3,7 +3,6 @@
*/
#include <linux/config.h>
#include <asm/gsc.h>
/*
* This assumes you have a 7.272727 MHz clock for your UART.
......@@ -27,20 +26,27 @@
#define ACCENT_FLAGS 0
#define BOCA_FLAGS 0
#define HUB6_FLAGS 0
#define RS_TABLE_SIZE 64
#else
#define RS_TABLE_SIZE 4
#endif
/*
* The base is relative to the LASI base. We can fix that
* up later. We could also virtually map LASI so that we get
* nice constants all over our kernel...
* We don't use the ISA probing code, so these entries are just to reserve
* space. Some example (maximal) configurations:
* - 712 w/ additional Lasi & RJ16 ports: 4
* - J5k w/ PCI serial cards: 2 + 4 * card ~= 34
* A500 w/ PCI serial cards: 5 + 4 * card ~= 17
*/
#define STD_SERIAL_PORT_DEFNS \
/* UART CLK PORT IRQ FLAGS */ \
{ 0, LASI_BASE_BAUD, -1, 4, ASYNC_SKIP_TEST, 0, PORT_UNKNOWN,}, /* ttyS0 */
{ 0, }, /* ttyS0 */ \
{ 0, }, /* ttyS1 */ \
{ 0, }, /* ttyS2 */ \
{ 0, }, /* ttyS3 */ \
{ 0, }, /* ttyS4 */ \
{ 0, }, /* ttyS5 */ \
{ 0, }, /* ttyS6 */ \
{ 0, }, /* ttyS7 */ \
{ 0, }, /* ttyS8 */
#define SERIAL_PORT_DFNS \
STD_SERIAL_PORT_DEFNS
......
......@@ -13,12 +13,21 @@
struct shmid64_ds {
struct ipc64_perm shm_perm; /* operation perms */
#ifndef __LP64__
unsigned int __pad1;
#endif
__kernel_time_t shm_atime; /* last attach time */
#ifndef __LP64__
unsigned int __pad2;
#endif
__kernel_time_t shm_dtime; /* last detach time */
#ifndef __LP64__
unsigned int __pad3;
#endif
__kernel_time_t shm_ctime; /* last change time */
#ifndef __LP64__
unsigned int __pad4;
#endif
size_t shm_segsz; /* size of segment (bytes) */
__kernel_pid_t shm_cpid; /* pid of creator */
__kernel_pid_t shm_lpid; /* pid of last operator */
......@@ -28,11 +37,10 @@ struct shmid64_ds {
};
#ifdef __LP64__
#warning shminfo64 is an undocumented struct
/* The 'unsigned int' (formerly 'unsigned long') data types below will
* ensure that a 32-bit app calling shmctl(*,IPC_INFO,*) will work on
* a wide kernel, but if some of these values are meant to contain pointers
* they may need to be 'long long' instead. -PB
* they may need to be 'long long' instead. -PB XXX FIXME
*/
#endif
struct shminfo64 {
......
#ifndef _ASMPARISC_SHMPARAM_H
#define _ASMPARISC_SHMPARAM_H
#define SHMLBA PAGE_SIZE /* attach addr a multiple of this */
#define SHMLBA 0x00400000 /* attach addr needs to be 4 Mb aligned */
#endif /* _ASMPARISC_SHMPARAM_H */
......@@ -3,8 +3,72 @@
#include <linux/config.h>
#ifdef CONFIG_SMP
extern volatile unsigned long cpu_online_map; /* Bitmap of available cpu's */
#endif
#if defined(CONFIG_SMP)
/* Page Zero Location PDC will look for the address to branch to when we poke
** slave CPUs still in "Icache loop".
*/
#define PDC_OS_BOOT_RENDEZVOUS 0x10
#define PDC_OS_BOOT_RENDEZVOUS_HI 0x28
#ifndef ASSEMBLY
#include <linux/threads.h> /* for NR_CPUS */
typedef unsigned long address_t;
extern volatile unsigned long cpu_online_map;
/*
* Private routines/data
*
* physical and logical are equivalent until we support CPU hotplug.
*/
#define cpu_number_map(cpu) (cpu)
#define cpu_logical_map(cpu) (cpu)
extern void smp_send_reschedule(int cpu);
#endif /* !ASSEMBLY */
/*
* This magic constant controls our willingness to transfer
* a process across CPUs. Such a transfer incurs cache and tlb
* misses. The current value is inherited from i386. Still needs
* to be tuned for parisc.
*/
#define PROC_CHANGE_PENALTY 15 /* Schedule penalty */
#undef ENTRY_SYS_CPUS
#ifdef ENTRY_SYS_CPUS
#define STATE_RENDEZVOUS 0
#define STATE_STOPPED 1
#define STATE_RUNNING 2
#define STATE_HALTED 3
#endif
extern unsigned long cpu_present_mask;
#define smp_processor_id() (current_thread_info()->cpu)
#define cpu_online(cpu) (cpu_online_map & (1<<(cpu)))
#define cpu_possible(cpu) (cpu_present_mask & (1<<(cpu)))
extern inline unsigned int num_online_cpus(void)
{
return hweight32(cpu_online_map);
}
extern inline int any_online_cpu(unsigned int mask)
{
if (mask & cpu_online_map)
return __ffs(mask & cpu_online_map);
return -1;
}
#endif /* CONFIG_SMP */
#define NO_PROC_ID 0xFF /* No processor magic marker */
#define ANY_PROC_ID 0xFF /* Any processor magic marker */
#endif /* __ASM_SMP_H */
......@@ -4,12 +4,17 @@
#include <asm/atomic.h>
#include <asm/hardirq.h>
#define cpu_bh_disable(cpu) do { local_bh_count(cpu)++; barrier(); } while (0)
#define cpu_bh_enable(cpu) do { barrier(); local_bh_count(cpu)--; } while (0)
#define local_bh_disable() \
do { preempt_count() += SOFTIRQ_OFFSET; barrier(); } while (0)
#define __local_bh_enable() \
do { barrier(); preempt_count() -= SOFTIRQ_OFFSET; } while (0)
#define local_bh_disable() cpu_bh_disable(smp_processor_id())
#define local_bh_enable() cpu_bh_enable(smp_processor_id())
#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
#define local_bh_enable() \
do { \
__local_bh_enable(); \
if (unlikely(!in_interrupt() && softirq_pending(smp_processor_id()))) \
do_softirq(); \
preempt_check_resched(); \
} while (0)
#endif /* __ASM_SOFTIRQ_H */
......@@ -3,23 +3,35 @@
#include <asm/system.h>
/* we seem to be the only architecture that uses 0 to mean locked - but we
* have to. prumpf */
/* Note that PA-RISC has to use `1' to mean unlocked and `0' to mean locked
* since it only has load-and-zero.
*/
#undef SPIN_LOCK_UNLOCKED
#define SPIN_LOCK_UNLOCKED (spinlock_t) { 1 }
#define spin_lock_init(x) do { (x)->lock = 1; } while(0)
#define spin_unlock_wait(x) do { barrier(); } while(((volatile spinlock_t *)(x))->lock == 1)
#define spin_is_locked(x) ((x)->lock == 0)
#define spin_unlock_wait(x) do { barrier(); } while(((volatile spinlock_t *)(x))->lock == 0)
#define spin_lock(x) \
#if 1
#define _raw_spin_lock(x) do { \
while (__ldcw (&(x)->lock) == 0) \
while (((x)->lock) == 0) ; } while (0)
#else
#define _raw_spin_lock(x) \
do { while(__ldcw(&(x)->lock) == 0); } while(0)
#endif
#define spin_unlock(x) \
#define _raw_spin_unlock(x) \
do { (x)->lock = 1; } while(0)
#define spin_trylock(x) (__ldcw(&(x)->lock) == 1)
#define _raw_spin_trylock(x) (__ldcw(&(x)->lock) != 0)
/*
* Read-write spinlocks, allowing multiple readers
......@@ -30,29 +42,37 @@ typedef struct {
volatile int counter;
} rwlock_t;
#define RW_LOCK_UNLOCKED (rwlock_t) { SPIN_LOCK_UNLOCKED, 0 }
#define RW_LOCK_UNLOCKED (rwlock_t) { {1}, 0 }
#define rwlock_init(lp) do { *(lp) = RW_LOCK_UNLOCKED; } while (0)
#define rwlock_is_locked(lp) ((lp)->counter != 0)
/* read_lock, read_unlock are pretty straightforward. Of course it somehow
* sucks we end up saving/restoring flags twice for read_lock_irqsave aso. */
static inline void read_lock(rwlock_t *rw)
static __inline__ void _raw_read_lock(rwlock_t *rw)
{
unsigned long flags;
spin_lock_irqsave(&rw->lock, flags);
local_irq_save(flags);
_raw_spin_lock(&rw->lock);
rw->counter++;
spin_unlock_irqrestore(&rw->lock, flags);
_raw_spin_unlock(&rw->lock);
local_irq_restore(flags);
}
static inline void read_unlock(rwlock_t *rw)
static __inline__ void _raw_read_unlock(rwlock_t *rw)
{
unsigned long flags;
spin_lock_irqsave(&rw->lock, flags);
local_irq_save(flags);
_raw_spin_lock(&rw->lock);
rw->counter--;
spin_unlock_irqrestore(&rw->lock, flags);
_raw_spin_unlock(&rw->lock);
local_irq_restore(flags);
}
/* write_lock is less trivial. We optimistically grab the lock and check
......@@ -64,14 +84,14 @@ static inline void read_unlock(rwlock_t *rw)
* writers) in interrupt handlers someone fucked up and we'd dead-lock
* sooner or later anyway. prumpf */
static inline void write_lock(rwlock_t *rw)
static __inline__ void _raw_write_lock(rwlock_t *rw)
{
retry:
spin_lock(&rw->lock);
_raw_spin_lock(&rw->lock);
if(rw->counter != 0) {
/* this basically never happens */
spin_unlock(&rw->lock);
_raw_spin_unlock(&rw->lock);
while(rw->counter != 0);
......@@ -79,13 +99,25 @@ static inline void write_lock(rwlock_t *rw)
}
/* got it. now leave without unlocking */
rw->counter = -1; /* remember we are locked */
}
/* write_unlock is absolutely trivial - we don't have to wait for anything */
static inline void write_unlock(rwlock_t *rw)
static __inline__ void _raw_write_unlock(rwlock_t *rw)
{
rw->counter = 0;
_raw_spin_unlock(&rw->lock);
}
static __inline__ int is_read_locked(rwlock_t *rw)
{
return rw->counter > 0;
}
static __inline__ int is_write_locked(rwlock_t *rw)
{
spin_unlock(&rw->lock);
return rw->counter < 0;
}
#endif /* __ASM_SPINLOCK_H */
......@@ -66,6 +66,33 @@ struct hpux_stat64 {
gid_t st_gid;
unsigned int st_spare4[3];
};
#define stat64 hpux_stat64
/* This is the struct that 32-bit userspace applications are expecting.
* How 64-bit apps are going to be compiled, I have no idea. But at least
* this way, we don't have a wrapper in the kernel.
*/
struct stat64 {
unsigned long long st_dev;
unsigned int __pad1;
unsigned int __st_ino; /* Not actually filled in */
unsigned int st_mode;
unsigned int st_nlink;
unsigned int st_uid;
unsigned int st_gid;
unsigned long long st_rdev;
unsigned int __pad2;
signed long long st_size;
signed int st_blksize;
signed long long st_blocks;
signed int st_atime;
unsigned int __unused1;
signed int st_mtime;
unsigned int __unused2;
signed int st_ctime;
unsigned int __unused3;
unsigned long long st_ino;
};
#endif
/* This left blank until we do parisc optimizations */
#define __HAVE_ARCH_MEMSET
extern void * memset(void *, int, size_t);
#ifndef _PARISC_SUPERIO_H
#define _PARISC_SUPERIO_H
/* Offsets to configuration and base address registers */
#define IC_PIC1 0x20 /* PCI I/O address of master 8259 */
#define IC_PIC2 0xA0 /* PCI I/O address of slave */
#define SIO_CR 0x5A /* Configuration Register */
#define SIO_ACPIBAR 0x88 /* ACPI BAR */
#define SIO_FDCBAR 0x90 /* Floppy Disk Controller BAR */
#define SIO_SP1BAR 0x94 /* Serial 1 BAR */
#define SIO_SP2BAR 0x98 /* Serial 2 BAR */
#define SIO_PPBAR 0x9C /* Parallel BAR */
/* Interrupt triggers and routing */
#define TRIGGER_1 0x67 /* Edge/level trigger register 1 */
#define TRIGGER_2 0x68 /* Edge/level trigger register 2 */
#define IR_SER 0x69 /* Serial 1 [0:3] and Serial 2 [4:7] */
#define IR_PFD 0x6a /* Parallel [0:3] and Floppy [4:7] */
#define IR_IDE 0x6b /* IDE1 [0:3] and IDE2 [4:7] */
#define IR_USB 0x6d /* USB [4:7] */
#define IR_LOW 0x69 /* Lowest interrupt routing reg */
#define IR_HIGH 0x71 /* Highest interrupt routing reg */
/* 8259 operational control words */
#define OCW2_EOI 0x20 /* Non-specific EOI */
#define OCW2_SEOI 0x60 /* Specific EOI */
#define OCW3_IIR 0x0A /* Read request register */
#define OCW3_ISR 0x0B /* Read service register */
#define OCW3_POLL 0x0C /* Poll the PIC for an interrupt vector */
/* Interrupt lines. Only PIC1 is used */
#define USB_IRQ 1 /* USB */
#define SP1_IRQ 3 /* Serial port 1 */
#define SP2_IRQ 4 /* Serial port 2 */
#define PAR_IRQ 5 /* Parallel port */
#define FDC_IRQ 6 /* Floppy controller */
#define IDE_IRQ 7 /* IDE (pri+sec) */
/* ACPI registers */
#define USB_REG_CR 0x1f /* USB Regulator Control Register */
#define SUPERIO_NIRQS 8
struct superio_device {
u16 fdc_base;
u16 sp1_base;
u16 sp2_base;
u16 pp_base;
u16 acpi_base;
int iosapic_irq;
int iosapic_irq_enabled;
struct irq_region *irq_region;
struct pci_dev *lio_pdev; /* pci device for legacy IO fn */
};
/*
* Does NS make a 87415 based plug in PCI card? If so, because of this
* macro we currently don't support it being plugged into a machine
* that contains a SuperIO chip AND has CONFIG_SUPERIO enabled.
*
* This could be fixed by checking to see if function 1 exists, and
* if it is SuperIO Legacy IO; but really now, is this combination
* going to EVER happen?
*/
#define SUPERIO_IDE_FN 0 /* Function number of IDE controller */
#define SUPERIO_LIO_FN 1 /* Function number of Legacy IO controller */
#define SUPERIO_USB_FN 2 /* Function number of USB controller */
#define is_superio_device(x) \
(((x)->vendor == PCI_VENDOR_ID_NS) && \
( ((x)->device == PCI_DEVICE_ID_NS_87415) \
|| ((x)->device == PCI_DEVICE_ID_NS_87560_LIO) \
|| ((x)->device == PCI_DEVICE_ID_NS_87560_USB) ) )
extern void superio_inform_irq(int irq);
extern void superio_serial_init(void); /* called by rs_init() */
extern int superio_fixup_irq(struct pci_dev *pcidev); /* called by iosapic */
extern int superio_get_ide_irq(void);
#endif /* _PARISC_SUPERIO_H */
......@@ -35,37 +35,24 @@ struct pa_psw {
unsigned int i:1;
};
#ifdef __LP64__
#define pa_psw(task) ((struct pa_psw *) ((char *) (task) + TASK_PT_PSW + 4))
#else
#define pa_psw(task) ((struct pa_psw *) ((char *) (task) + TASK_PT_PSW))
#endif
struct task_struct;
extern struct task_struct *_switch_to(struct task_struct *, struct task_struct *);
#define prepare_to_switch() do { } while(0)
#define switch_to(prev, next, last) do { \
(last) = _switch_to(prev, next); \
} while(0)
/* borrowed this from sparc64 -- probably the SMP case is hosed for us */
#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() wmb()
#define smp_read_barrier_depends() do { } while(0)
#else
/* This is simply the barrier() macro from linux/kernel.h but when serial.c
* uses tqueue.h uses smp_mb() defined using barrier(), linux/kernel.h
* hasn't yet been included yet so it fails, thus repeating the macro here.
*/
#define smp_mb() __asm__ __volatile__("":::"memory");
#define smp_rmb() __asm__ __volatile__("":::"memory");
#define smp_wmb() __asm__ __volatile__("":::"memory");
#define smp_read_barrier_depends() do { } while(0)
#endif
/* interrupt control */
#define local_save_flags(x) __asm__ __volatile__("ssm 0, %0" : "=r" (x) : : "memory")
#define local_irq_restore(x) __asm__ __volatile__("mtsm %0" : : "r" (x) : "memory")
#define local_irq_disable() __asm__ __volatile__("rsm %0,%%r0\n" : : "i" (PSW_I) : "memory" )
#define local_irq_enable() __asm__ __volatile__("ssm %0,%%r0\n" : : "i" (PSW_I) : "memory" )
......@@ -74,14 +61,12 @@ extern struct task_struct *_switch_to(struct task_struct *, struct task_struct *
#define local_irq_restore(x) \
__asm__ __volatile__("mtsm %0" : : "r" (x) : "memory" )
#ifdef CONFIG_SMP
#else
#define cli() local_irq_disable()
#define sti() local_irq_enable()
#define save_flags(x) local_save_flags(x)
#define restore_flags(x) local_irq_restore(x)
#endif
#define irqs_disabled() \
({ \
unsigned long flags; \
local_save_flags(flags); \
(flags & PSW_I) == 0; \
})
#define mfctl(reg) ({ \
unsigned long cr; \
......@@ -120,24 +105,46 @@ static inline void set_eiem(unsigned long val)
: "r" (gr), "i" (cr))
#define mb() __asm__ __volatile__ ("sync" : : :"memory")
#define wmb() mb()
#define read_barrier_depends() do { } while(0)
#define set_mb(var, value) do { var = value; mb(); } while (0)
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
/*
** This is simply the barrier() macro from linux/kernel.h but when serial.c
** uses tqueue.h uses smp_mb() defined using barrier(), linux/kernel.h
** hasn't yet been included yet so it fails, thus repeating the macro here.
**
** PA-RISC architecture allows for weakly ordered memory accesses although
** none of the processors use it. There is a strong ordered bit that is
** set in the O-bit of the page directory entry. Operating systems that
** can not tolerate out of order accesses should set this bit when mapping
** pages. The O-bit of the PSW should also be set to 1 (I don't believe any
** of the processor implemented the PSW O-bit). The PCX-W ERS states that
** the TLB O-bit is not implemented so the page directory does not need to
** have the O-bit set when mapping pages (section 3.1). This section also
** states that the PSW Y, Z, G, and O bits are not implemented.
** So it looks like nothing needs to be done for parisc-linux (yet).
** (thanks to chada for the above comment -ggg)
**
** The __asm__ op below simple prevents gcc/ld from reordering
** instructions across the mb() "call".
*/
#define mb() __asm__ __volatile__("":::"memory"); /* barrier() */
#define rmb() mb()
#define wmb() mb()
#define smp_mb() mb()
#define smp_wmb() mb()
#define smp_read_barrier_depends() do { } while(0)
#define read_barrier_depends() do { } while(0)
extern unsigned long __xchg(unsigned long, unsigned long *, int);
#define set_mb(var, value) do { var = value; mb(); } while (0)
#define set_wmb(var, value) do { var = value; wmb(); } while (0)
#define xchg(ptr,x) \
(__typeof__(*(ptr)))__xchg((unsigned long)(x),(unsigned long*)(ptr),sizeof(*(ptr)))
/* LDCW, the only atomic read-write operation PA-RISC has. Sigh. */
/* LDCW, the only atomic read-write operation PA-RISC has. *sigh*. */
#define __ldcw(a) ({ \
unsigned __ret; \
__asm__ __volatile__("ldcw 0(%1),%0" : "=r" (__ret) : "r" (a)); \
__ret; \
})
#ifdef CONFIG_SMP
/*
* Your basic SMP spinlocks, allowing only a single CPU anywhere
......
......@@ -53,6 +53,8 @@ struct termio {
#define N_PROFIBUS_FDL 10 /* Reserved for Profibus <Dave@mvhi.com> */
#define N_IRDA 11 /* Linux IR - http://irda.sourceforge.net/ */
#define N_SMSBLOCK 12 /* SMS block mode - for talking to GSM data cards about SMS messages */
#define N_HDLC 13 /* synchronous HDLC */
#define N_SYNC_PPP 14
#define N_HCI 15 /* Bluetooth HCI UART */
#ifdef __KERNEL__
......
#ifndef _ASM_PARISC_THREAD_INFO_H
#define _ASM_PARISC_THREAD_INFO_H
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#include <asm/processor.h>
struct thread_info {
struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain;/* execution domain */
__u32 flags; /* thread_info flags (see TIF_*) */
__u32 cpu; /* current CPU */
mm_segment_t addr_limit; /* user-level address space limit */
__s32 preempt_count; /* 0=premptable, <0=BUG; will also serve as bh-counter */
};
#define INIT_THREAD_INFO(tsk) \
{ \
task: &tsk, \
exec_domain: &default_exec_domain, \
flags: 0, \
cpu: 0, \
addr_limit: KERNEL_DS, \
preempt_count: 0, \
}
#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
/* thread information allocation */
#define THREAD_ORDER 2
/* Be sure to hunt all references to this down when you change the size of
* the kernel stack */
#define THREAD_SIZE (PAGE_SIZE << THREAD_ORDER)
#define THREAD_SHIFT (PAGE_SHIFT + THREAD_ORDER)
#define alloc_thread_info() ((struct thread_info *) \
__get_free_pages(GFP_KERNEL, THREAD_ORDER))
#define free_thread_info(ti) free_pages((unsigned long) (ti), THREAD_ORDER)
#define get_thread_info(ti) get_task_struct((ti)->task)
#define put_thread_info(ti) put_task_struct((ti)->task)
/* how to get the thread information struct from C */
#define current_thread_info() ((struct thread_info *)mfctl(30))
#endif /* !__ASSEMBLY */
#define PREEMPT_ACTIVE 0x4000000
/*
* thread information flags
*/
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_POLLING_NRFLAG 4 /* true if poll_idle() is polling TIF_NEED_RESCHED */
#define TIF_32BIT 5 /* 32 bit binary */
#define TIF_WORK_MASK 0x7 /* like TIF_ALLWORK_BITS but sans TIF_SYSCALL_TRACE */
#define TIF_ALLWORK_MASK 0xf /* bits 0..3 are "work to do on user-return" bits */
#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
#define _TIF_POLLING_NRFLAG (1 << TIF_POLLING_NRFLAG)
#define _TIF_32BIT (1 << TIF_32BIT)
#define _TIF_USER_WORK_MASK (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | \
_TIF_NEED_RESCHED)
#endif /* __KERNEL__ */
#endif /* _ASM_PARISC_THREAD_INFO_H */
......@@ -9,6 +9,8 @@
#include <asm/system.h>
#include <linux/time.h>
#define CLOCK_TICK_RATE 1193180 /* Underlying HZ */
typedef unsigned long cycles_t;
extern cycles_t cacheflush_time;
......
#ifndef _PARISC_TLB_H
#define _PARISC_TLB_H
#define tlb_flush(tlb) \
do { if ((tlb)->fullmm) \
flush_tlb_mm((tlb)->mm);\
} while (0)
#define tlb_start_vma(tlb, vma) \
do { if (!(tlb)->fullmm) \
flush_cache_range(vma, vma->vm_start, vma->vm_end); \
} while (0)
#define tlb_end_vma(tlb, vma) \
do { if (!(tlb)->fullmm) \
flush_tlb_range(vma, vma->vm_start, vma->vm_end); \
} while (0)
#define __tlb_remove_tlb_entry(tlb, pte, address) \
do { } while (0)
#include <asm-generic/tlb.h>
#define __pmd_free_tlb(tlb, pmd) pmd_free(pmd)
#define __pte_free_tlb(tlb, pte) pte_free(pte)
#endif
#ifndef _PARISC_TLBFLUSH_H
#define _PARISC_TLBFLUSH_H
/* TLB flushing routines.... */
#include <linux/mm.h>
#include <asm/mmu_context.h>
extern void flush_tlb_all(void);
/*
* flush_tlb_mm()
*
* XXX This code is NOT valid for HP-UX compatibility processes,
* (although it will probably work 99% of the time). HP-UX
* processes are free to play with the space id's and save them
* over long periods of time, etc. so we have to preserve the
* space and just flush the entire tlb. We need to check the
* personality in order to do that, but the personality is not
* currently being set correctly.
*
* Of course, Linux processes could do the same thing, but
* we don't support that (and the compilers, dynamic linker,
* etc. do not do that).
*/
static inline void flush_tlb_mm(struct mm_struct *mm)
{
if (mm == &init_mm) BUG(); /* Should never happen */
#ifdef CONFIG_SMP
flush_tlb_all();
#else
if (mm) {
if (mm->context != 0)
free_sid(mm->context);
mm->context = alloc_sid();
if (mm == current->active_mm)
load_context(mm->context);
}
#endif
}
extern __inline__ void flush_tlb_pgtables(struct mm_struct *mm, unsigned long start, unsigned long end)
{
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
/* For one page, it's not worth testing the split_tlb variable */
mtsp(vma->vm_mm->context,1);
pdtlb(addr);
pitlb(addr);
}
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
unsigned long npages;
npages = ((end - (start & PAGE_MASK)) + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
if (npages >= 512) /* XXX arbitrary, should be tuned */
flush_tlb_all();
else {
mtsp(vma->vm_mm->context,1);
if (split_tlb) {
while (npages--) {
pdtlb(start);
pitlb(start);
start += PAGE_SIZE;
}
} else {
while (npages--) {
pdtlb(start);
start += PAGE_SIZE;
}
}
}
}
#define flush_tlb_kernel_range(start, end) flush_tlb_all()
#endif
#ifndef __ASM_TRAPS_H
#define __ASM_TRAPS_H
#ifdef __KERNEL__
struct pt_regs;
/* traps.c */
void parisc_terminate(char *msg, struct pt_regs *regs,
int code, unsigned long offset);
/* mm/fault.c */
void do_page_fault(struct pt_regs *regs, unsigned long code,
unsigned long address);
#endif
#endif
......@@ -48,6 +48,7 @@ typedef unsigned long long u64;
/* Dma addresses are 32-bits wide. */
typedef u32 dma_addr_t;
typedef u64 dma64_addr_t;
#endif /* __KERNEL__ */
......
......@@ -18,8 +18,8 @@
#define segment_eq(a,b) ((a).seg == (b).seg)
#define get_ds() (KERNEL_DS)
#define get_fs() (current->addr_limit)
#define set_fs(x) (current->addr_limit = (x))
#define get_fs() (current_thread_info()->addr_limit)
#define set_fs(x) (current_thread_info()->addr_limit = (x))
/*
* Note that since kernel addresses are in a separate address space on
......@@ -34,6 +34,18 @@
#define put_user __put_user
#define get_user __get_user
#if BITS_PER_LONG == 32
#define LDD_KERNEL(ptr) BUG()
#define LDD_USER(ptr) BUG()
#define STD_KERNEL(x, ptr) __put_kernel_asm64(x,ptr)
#define STD_USER(x, ptr) __put_user_asm64(x,ptr)
#else
#define LDD_KERNEL(ptr) __get_kernel_asm("ldd",ptr)
#define LDD_USER(ptr) __get_user_asm("ldd",ptr)
#define STD_KERNEL(x, ptr) __put_kernel_asm("std",x,ptr)
#define STD_USER(x, ptr) __put_user_asm("std",x,ptr)
#endif
/*
* The exception table contains two values: the first is an address
* for an instruction that is allowed to fault, and the second is
......@@ -46,7 +58,7 @@
struct exception_table_entry {
unsigned long addr; /* address of insn that is allowed to fault. */
int skip; /* pcoq skip | r9 clear flag | r8 -EFAULT flag */
long skip; /* pcoq skip | r9 clear flag | r8 -EFAULT flag */
};
extern const struct exception_table_entry
......@@ -62,7 +74,7 @@ extern const struct exception_table_entry
case 1: __get_kernel_asm("ldb",ptr); break; \
case 2: __get_kernel_asm("ldh",ptr); break; \
case 4: __get_kernel_asm("ldw",ptr); break; \
case 8: __get_kernel_asm("ldd",ptr); break; \
case 8: LDD_KERNEL(ptr); break; \
default: BUG(); break; \
} \
} \
......@@ -71,7 +83,7 @@ extern const struct exception_table_entry
case 1: __get_user_asm("ldb",ptr); break; \
case 2: __get_user_asm("ldh",ptr); break; \
case 4: __get_user_asm("ldw",ptr); break; \
case 8: __get_user_asm("ldd",ptr); break; \
case 8: LDD_USER(ptr); break; \
default: BUG(); break; \
} \
} \
......@@ -80,6 +92,27 @@ extern const struct exception_table_entry
__gu_err; \
})
#ifdef __LP64__
#define __get_kernel_asm(ldx,ptr) \
__asm__("\n1:\t" ldx "\t0(%2),%0\n" \
"2:\n" \
"\t.section __ex_table,\"a\"\n" \
"\t.dword\t1b\n" \
"\t.dword\t(2b-1b)+3\n" \
"\t.previous" \
: "=r"(__gu_val), "=r"(__gu_err) \
: "r"(ptr), "1"(__gu_err));
#define __get_user_asm(ldx,ptr) \
__asm__("\n1:\t" ldx "\t0(%%sr3,%2),%0\n" \
"2:\n" \
"\t.section __ex_table,\"a\"\n" \
"\t.dword\t1b\n" \
"\t.dword\t(2b-1b)+3\n" \
"\t.previous" \
: "=r"(__gu_val), "=r"(__gu_err) \
: "r"(ptr), "1"(__gu_err));
#else
#define __get_kernel_asm(ldx,ptr) \
__asm__("\n1:\t" ldx "\t0(%2),%0\n" \
"2:\n" \
......@@ -99,7 +132,7 @@ extern const struct exception_table_entry
"\t.previous" \
: "=r"(__gu_val), "=r"(__gu_err) \
: "r"(ptr), "1"(__gu_err));
#endif
#define __put_user(x,ptr) \
({ \
......@@ -110,7 +143,7 @@ extern const struct exception_table_entry
case 1: __put_kernel_asm("stb",x,ptr); break; \
case 2: __put_kernel_asm("sth",x,ptr); break; \
case 4: __put_kernel_asm("stw",x,ptr); break; \
case 8: __put_kernel_asm("std",x,ptr); break; \
case 8: STD_KERNEL(x,ptr); break; \
default: BUG(); break; \
} \
} \
......@@ -119,7 +152,7 @@ extern const struct exception_table_entry
case 1: __put_user_asm("stb",x,ptr); break; \
case 2: __put_user_asm("sth",x,ptr); break; \
case 4: __put_user_asm("stw",x,ptr); break; \
case 8: __put_user_asm("std",x,ptr); break; \
case 8: STD_USER(x,ptr); break; \
default: BUG(); break; \
} \
} \
......@@ -133,6 +166,29 @@ extern const struct exception_table_entry
* gcc knows about, so there are no aliasing issues.
*/
#ifdef __LP64__
#define __put_kernel_asm(stx,x,ptr) \
__asm__ __volatile__ ( \
"\n1:\t" stx "\t%2,0(%1)\n" \
"2:\n" \
"\t.section __ex_table,\"a\"\n" \
"\t.dword\t1b\n" \
"\t.dword\t(2b-1b)+1\n" \
"\t.previous" \
: "=r"(__pu_err) \
: "r"(ptr), "r"(x), "0"(__pu_err))
#define __put_user_asm(stx,x,ptr) \
__asm__ __volatile__ ( \
"\n1:\t" stx "\t%2,0(%%sr3,%1)\n" \
"2:\n" \
"\t.section __ex_table,\"a\"\n" \
"\t.dword\t1b\n" \
"\t.dword\t(2b-1b)+1\n" \
"\t.previous" \
: "=r"(__pu_err) \
: "r"(ptr), "r"(x), "0"(__pu_err))
#else
#define __put_kernel_asm(stx,x,ptr) \
__asm__ __volatile__ ( \
"\n1:\t" stx "\t%2,0(%1)\n" \
......@@ -155,6 +211,44 @@ extern const struct exception_table_entry
: "=r"(__pu_err) \
: "r"(ptr), "r"(x), "0"(__pu_err))
static inline void __put_kernel_asm64(u64 x, void *ptr)
{
u32 hi = x>>32;
u32 lo = x&0xffffffff;
__asm__ __volatile__ (
"\n1:\tstw %1,0(%0)\n"
"\n2:\tstw %2,4(%0)\n"
"3:\n"
"\t.section __ex_table,\"a\"\n"
"\t.word\t1b\n"
"\t.word\t(3b-1b)+1\n"
"\t.word\t2b\n"
"\t.word\t(3b-2b)+1\n"
"\t.previous"
: : "r"(ptr), "r"(hi), "r"(lo));
}
static inline void __put_user_asm64(u64 x, void *ptr)
{
u32 hi = x>>32;
u32 lo = x&0xffffffff;
__asm__ __volatile__ (
"\n1:\tstw %1,0(%%sr3,%0)\n"
"\n2:\tstw %2,4(%%sr3,%0)\n"
"3:\n"
"\t.section __ex_table,\"a\"\n"
"\t.word\t1b\n"
"\t.word\t(3b-1b)+1\n"
"\t.word\t2b\n"
"\t.word\t(3b-2b)+1\n"
"\t.previous"
: : "r"(ptr), "r"(hi), "r"(lo));
}
#endif
/*
* Complex access routines -- external declarations
......@@ -174,16 +268,11 @@ extern long lstrnlen_user(const char *,long);
#define strnlen_user lstrnlen_user
#define strlen_user(str) lstrnlen_user(str, 0x7fffffffL)
#define clear_user lclear_user
#define __clear_user lclear_user
#define copy_from_user lcopy_from_user
#define __copy_from_user lcopy_from_user
#define copy_to_user lcopy_to_user
#define __copy_to_user lcopy_to_user
#define copy_to_user_ret(to,from,n,retval) \
({ if (lcopy_to_user(to,from,n)) return retval; })
#define copy_from_user_ret(to,from,n,retval) \
({ if (lcopy_from_user(to,from,n)) return retval; })
#endif /* __PARISC_UACCESS_H */
......@@ -17,4 +17,11 @@
memmove((ptr), &__tmp, sizeof(*(ptr))); \
(void)0; })
#endif /* _ASM_PARISC_UNALIGNED_H */
#ifdef __KERNEL__
struct pt_regs;
void handle_unaligned(struct pt_regs *regs);
int check_unaligned(struct pt_regs *regs);
#endif
#endif /* _ASM_PARISC_UNALIGNED_H_ */
......@@ -579,7 +579,7 @@
#define __NR_uselib (__NR_Linux + 86)
#define __NR_swapon (__NR_Linux + 87)
#define __NR_reboot (__NR_Linux + 88)
#define __NR_readdir (__NR_Linux + 89)
#define __NR_mmap2 (__NR_Linux + 89)
#define __NR_mmap (__NR_Linux + 90)
#define __NR_munmap (__NR_Linux + 91)
#define __NR_truncate (__NR_Linux + 92)
......@@ -591,8 +591,8 @@
#define __NR_recv (__NR_Linux + 98)
#define __NR_statfs (__NR_Linux + 99)
#define __NR_fstatfs (__NR_Linux + 100)
#define __NR_ioperm (__NR_Linux + 101)
#define __NR_socketcall (__NR_Linux + 102)
#define __NR_stat64 (__NR_Linux + 101)
/* #define __NR_socketcall (__NR_Linux + 102) */
#define __NR_syslog (__NR_Linux + 103)
#define __NR_setitimer (__NR_Linux + 104)
#define __NR_getitimer (__NR_Linux + 105)
......@@ -602,7 +602,7 @@
#define __NR_pwrite64 (__NR_Linux + 109)
#define __NR_getcwd (__NR_Linux + 110)
#define __NR_vhangup (__NR_Linux + 111)
#define __NR_idle (__NR_Linux + 112)
#define __NR_fstat64 (__NR_Linux + 112)
#define __NR_vfork (__NR_Linux + 113)
#define __NR_wait4 (__NR_Linux + 114)
#define __NR_swapoff (__NR_Linux + 115)
......@@ -689,14 +689,25 @@
#define __NR_getpmsg (__NR_Linux + 196) /* some people actually want streams */
#define __NR_putpmsg (__NR_Linux + 197) /* some people actually want streams */
#define __NR_gettid (__NR_Linux + 198)
#define __NR_tkill (__NR_Linux + 199)
#define __NR_Linux_syscalls 199
#define __NR_lstat64 (__NR_Linux + 198)
#define __NR_truncate64 (__NR_Linux + 199)
#define __NR_ftruncate64 (__NR_Linux + 200)
#define __NR_getdents64 (__NR_Linux + 201)
#define __NR_fcntl64 (__NR_Linux + 202)
#define __NR_attrctl (__NR_Linux + 203)
#define __NR_acl_get (__NR_Linux + 204)
#define __NR_acl_set (__NR_Linux + 205)
#define __NR_gettid (__NR_Linux + 206)
#define __NR_readahead (__NR_Linux + 207)
#define __NR_tkill (__NR_Linux + 208)
#define __NR_Linux_syscalls 208
#define HPUX_GATEWAY_ADDR 0xC0000004
#define LINUX_GATEWAY_ADDR 0x100
#define LINUX_GATEWAY_STR "0x100"
#ifndef __ASSEMBLY__
/* The old syscall code here didn't work, and it looks like it's only used
* by applications such as fdisk which for some reason need to produce
......@@ -725,7 +736,7 @@
} \
if (__sys_res >= (unsigned long)-4095) { \
errno = -__sys_res; \
__sys_res == (unsigned long)-1; \
__sys_res = (unsigned long)-1; \
} \
__sys_res; \
})
......@@ -796,7 +807,7 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5) \
}
/* mmap takes 6 arguments */
/* mmap & mmap2 take 6 arguments */
#define _syscall6(type,name,type1,arg1,type2,arg2,type3,arg3,type4,arg4,type5,arg5,type6,arg6) \
type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6) \
......@@ -804,8 +815,11 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, type5 arg5, type6 arg6
return K_INLINE_SYSCALL(name, 6, arg1, arg2, arg3, arg4, arg5, arg6); \
}
#ifdef __KERNEL_SYSCALLS__
#include <asm/current.h>
static inline pid_t setsid(void)
{
extern int sys_setsid(void);
......@@ -836,6 +850,13 @@ static inline int dup(int fd)
return sys_dup(fd);
}
static inline int execve(char *filename, char * argv [],
char * envp[])
{
extern int __execve(char *, char **, char **, struct task_struct *);
return __execve(filename, argv, envp, current);
}
static inline int open(const char *file, int flag, int mode)
{
extern long sys_open(const char *, int, int);
......@@ -844,6 +865,7 @@ static inline int open(const char *file, int flag, int mode)
static inline int close(int fd)
{
extern asmlinkage long sys_close(unsigned int);
return sys_close(fd);
}
......@@ -853,20 +875,17 @@ static inline int _exit(int exitcode)
return sys_exit(exitcode);
}
struct rusage;
extern asmlinkage long sys_wait4(pid_t, unsigned int *, int, struct rusage *);
static inline pid_t waitpid(pid_t pid, int *wait_stat, int options)
{
extern int sys_wait4(int, int *, int, struct rusage *);
return sys_wait4((int)pid, wait_stat, options, NULL);
return sys_wait4(pid, wait_stat, options, NULL);
}
static inline int execve(char *filename, char * argv [],
char * envp[])
{
extern int __execve(char *, char **, char **, struct task_struct *);
return __execve(filename, argv, envp, current);
}
#endif /* __KERNEL_SYSCALLS__ */
#endif
#endif /* __ASSEMBLY__ */
#undef STR
......
#include <asm-generic/xor.h>
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment