Commit e105d28a authored by Martin Schwidefsky's avatar Martin Schwidefsky Committed by Linus Torvalds

[PATCH] s/390 patches for 2.5.20 (2 of 4).

Second patch of the s/390 update. Contains all the include file changes in
include/asm-{s390,s390x}.
parent c1997c8d
......@@ -26,29 +26,17 @@ typedef struct { volatile int counter; } __attribute__ ((aligned (4))) atomic_t;
#define atomic_eieio() __asm__ __volatile__ ("BCR 15,0")
#define __CS_LOOP(old_val, new_val, ptr, op_val, op_string) \
__asm__ __volatile__(" l %0,0(%2)\n" \
__asm__ __volatile__(" l %0,0(%3)\n" \
"0: lr %1,%0\n" \
op_string " %1,%3\n" \
" cs %0,%1,0(%2)\n" \
op_string " %1,%4\n" \
" cs %0,%1,0(%3)\n" \
" jl 0b" \
: "=&d" (old_val), "=&d" (new_val) \
: "=&d" (old_val), "=&d" (new_val), \
"+m" (((atomic_t *)(ptr))->counter) \
: "a" (ptr), "d" (op_val) : "cc" );
static __inline__ int atomic_read(atomic_t *v)
{
int retval;
__asm__ __volatile__("bcr 15,0\n\t"
"l %0,%1"
: "=d" (retval) : "m" (*v) );
return retval;
}
static __inline__ void atomic_set(atomic_t *v, int i)
{
__asm__ __volatile__("st %1,%0\n\t"
"bcr 15,0"
: "=m" (*v) : "d" (i) );
}
#define atomic_read(v) ((v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i))
static __inline__ void atomic_add(int i, atomic_t *v)
{
......@@ -138,14 +126,14 @@ atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v)
int retval;
__asm__ __volatile__(
" lr 0,%2\n"
" cs 0,%3,0(%1)\n"
" lr %0,%3\n"
" cs %0,%4,0(%2)\n"
" ipm %0\n"
" srl %0,28\n"
"0:"
: "=&d" (retval)
: "=&d" (retval), "+m" (v->counter)
: "a" (v), "d" (expected_oldval) , "d" (new_val)
: "0", "cc");
: "cc" );
return retval;
}
......@@ -155,12 +143,14 @@ atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v)
static __inline__ void
atomic_compare_and_swap_spin(int expected_oldval,int new_val,atomic_t *v)
{
unsigned long tmp;
__asm__ __volatile__(
"0: lr 0,%1\n"
" cs 0,%2,0(%0)\n"
"0: lr %1,%3\n"
" cs %1,%4,0(%2)\n"
" jl 0b\n"
: : "a" (v), "d" (expected_oldval) , "d" (new_val)
: "cc", "0" );
: "+m" (v->counter), "=&d" (tmp)
: "a" (v), "d" (expected_oldval) , "d" (new_val)
: "cc" );
}
#define atomic_compare_and_swap_debug(where,from,to) \
......
......@@ -47,263 +47,215 @@
extern const char _oi_bitmap[];
extern const char _ni_bitmap[];
extern const char _zb_findmap[];
extern const char _sb_findmap[];
#ifdef CONFIG_SMP
/*
* SMP save set_bit routine based on compare and swap (CS)
*/
static __inline__ void set_bit_cs(int nr, volatile void * addr)
static inline void set_bit_cs(int nr, volatile void *ptr)
{
__asm__ __volatile__(
unsigned long addr, old, new, mask;
addr = (unsigned long) ptr;
#if ALIGN_CS == 1
" lhi 1,3\n" /* CS must be aligned on 4 byte b. */
" nr 1,%1\n" /* isolate last 2 bits of address */
" xr %1,1\n" /* make addr % 4 == 0 */
" sll 1,3\n"
" ar %0,1\n" /* add alignement to bitnr */
addr ^= addr & 3; /* align address to 4 */
nr += (addr & 3) << 3; /* add alignment to bit number */
#endif
" lhi 1,31\n"
" nr 1,%0\n" /* make shift value */
" xr %0,1\n"
" srl %0,3\n"
" lhi 2,1\n"
" la %1,0(%0,%1)\n" /* calc. address for CS */
" sll 2,0(1)\n" /* make OR mask */
" l %0,0(%1)\n"
"0: lr 1,%0\n" /* CS loop starts here */
" or 1,2\n" /* set bit */
" cs %0,1,0(%1)\n"
addr += (nr ^ (nr & 31)) >> 3; /* calculate address for CS */
mask = 1UL << (nr & 31); /* make OR mask */
asm volatile(
" l %0,0(%4)\n"
"0: lr %1,%0\n"
" or %1,%3\n"
" cs %0,%1,0(%4)\n"
" jl 0b"
: "+a" (nr), "+a" (addr) :
: "cc", "memory", "1", "2" );
: "=&d" (old), "=&d" (new), "+m" (*(unsigned int *) addr)
: "d" (mask), "a" (addr)
: "cc" );
}
/*
* SMP save clear_bit routine based on compare and swap (CS)
*/
static __inline__ void clear_bit_cs(int nr, volatile void * addr)
static inline void clear_bit_cs(int nr, volatile void *ptr)
{
static const int mask = -1;
__asm__ __volatile__(
unsigned long addr, old, new, mask;
addr = (unsigned long) ptr;
#if ALIGN_CS == 1
" lhi 1,3\n" /* CS must be aligned on 4 byte b. */
" nr 1,%1\n" /* isolate last 2 bits of address */
" xr %1,1\n" /* make addr % 4 == 0 */
" sll 1,3\n"
" ar %0,1\n" /* add alignement to bitnr */
addr ^= addr & 3; /* align address to 4 */
nr += (addr & 3) << 3; /* add alignment to bit number */
#endif
" lhi 1,31\n"
" nr 1,%0\n" /* make shift value */
" xr %0,1\n"
" srl %0,3\n"
" lhi 2,1\n"
" la %1,0(%0,%1)\n" /* calc. address for CS */
" sll 2,0(1)\n"
" x 2,%2\n" /* make AND mask */
" l %0,0(%1)\n"
"0: lr 1,%0\n" /* CS loop starts here */
" nr 1,2\n" /* clear bit */
" cs %0,1,0(%1)\n"
addr += (nr ^ (nr & 31)) >> 3; /* calculate address for CS */
mask = ~(1UL << (nr & 31)); /* make AND mask */
asm volatile(
" l %0,0(%4)\n"
"0: lr %1,%0\n"
" nr %1,%3\n"
" cs %0,%1,0(%4)\n"
" jl 0b"
: "+a" (nr), "+a" (addr) : "m" (mask)
: "cc", "memory", "1", "2" );
: "=&d" (old), "=&d" (new), "+m" (*(unsigned int *) addr)
: "d" (mask), "a" (addr)
: "cc" );
}
/*
* SMP save change_bit routine based on compare and swap (CS)
*/
static __inline__ void change_bit_cs(int nr, volatile void * addr)
static inline void change_bit_cs(int nr, volatile void *ptr)
{
__asm__ __volatile__(
unsigned long addr, old, new, mask;
addr = (unsigned long) ptr;
#if ALIGN_CS == 1
" lhi 1,3\n" /* CS must be aligned on 4 byte b. */
" nr 1,%1\n" /* isolate last 2 bits of address */
" xr %1,1\n" /* make addr % 4 == 0 */
" sll 1,3\n"
" ar %0,1\n" /* add alignement to bitnr */
addr ^= addr & 3; /* align address to 4 */
nr += (addr & 3) << 3; /* add alignment to bit number */
#endif
" lhi 1,31\n"
" nr 1,%0\n" /* make shift value */
" xr %0,1\n"
" srl %0,3\n"
" lhi 2,1\n"
" la %1,0(%0,%1)\n" /* calc. address for CS */
" sll 2,0(1)\n" /* make XR mask */
" l %0,0(%1)\n"
"0: lr 1,%0\n" /* CS loop starts here */
" xr 1,2\n" /* change bit */
" cs %0,1,0(%1)\n"
addr += (nr ^ (nr & 31)) >> 3; /* calculate address for CS */
mask = 1UL << (nr & 31); /* make XOR mask */
asm volatile(
" l %0,0(%4)\n"
"0: lr %1,%0\n"
" xr %1,%3\n"
" cs %0,%1,0(%4)\n"
" jl 0b"
: "+a" (nr), "+a" (addr) :
: "cc", "memory", "1", "2" );
: "=&d" (old), "=&d" (new), "+m" (*(unsigned int *) addr)
: "d" (mask), "a" (addr)
: "cc" );
}
/*
* SMP save test_and_set_bit routine based on compare and swap (CS)
*/
static __inline__ int test_and_set_bit_cs(int nr, volatile void * addr)
static inline int test_and_set_bit_cs(int nr, volatile void *ptr)
{
__asm__ __volatile__(
unsigned long addr, old, new, mask;
addr = (unsigned long) ptr;
#if ALIGN_CS == 1
" lhi 1,3\n" /* CS must be aligned on 4 byte b. */
" nr 1,%1\n" /* isolate last 2 bits of address */
" xr %1,1\n" /* make addr % 4 == 0 */
" sll 1,3\n"
" ar %0,1\n" /* add alignement to bitnr */
addr ^= addr & 3; /* align address to 4 */
nr += (addr & 3) << 3; /* add alignment to bit number */
#endif
" lhi 1,31\n"
" nr 1,%0\n" /* make shift value */
" xr %0,1\n"
" srl %0,3\n"
" la %1,0(%0,%1)\n" /* calc. address for CS */
" lhi 2,1\n"
" sll 2,0(1)\n" /* make OR mask */
" l %0,0(%1)\n"
"0: lr 1,%0\n" /* CS loop starts here */
" or 1,2\n" /* set bit */
" cs %0,1,0(%1)\n"
" jl 0b\n"
" nr %0,2\n" /* isolate old bit */
: "+a" (nr), "+a" (addr) :
: "cc", "memory", "1", "2" );
return nr;
addr += (nr ^ (nr & 31)) >> 3; /* calculate address for CS */
mask = 1UL << (nr & 31); /* make OR/test mask */
asm volatile(
" l %0,0(%4)\n"
"0: lr %1,%0\n"
" or %1,%3\n"
" cs %0,%1,0(%4)\n"
" jl 0b"
: "=&d" (old), "=&d" (new), "+m" (*(unsigned int *) addr)
: "d" (mask), "a" (addr)
: "cc" );
return (old & mask) != 0;
}
/*
* SMP save test_and_clear_bit routine based on compare and swap (CS)
*/
static __inline__ int test_and_clear_bit_cs(int nr, volatile void * addr)
static inline int test_and_clear_bit_cs(int nr, volatile void *ptr)
{
static const int mask = -1;
__asm__ __volatile__(
unsigned long addr, old, new, mask;
addr = (unsigned long) ptr;
#if ALIGN_CS == 1
" lhi 1,3\n" /* CS must be aligned on 4 byte b. */
" nr 1,%1\n" /* isolate last 2 bits of address */
" xr %1,1\n" /* make addr % 4 == 0 */
" sll 1,3\n"
" ar %0,1\n" /* add alignement to bitnr */
addr ^= addr & 3; /* align address to 4 */
nr += (addr & 3) << 3; /* add alignment to bit number */
#endif
" lhi 1,31\n"
" nr 1,%0\n" /* make shift value */
" xr %0,1\n"
" srl %0,3\n"
" la %1,0(%0,%1)\n" /* calc. address for CS */
" lhi 2,1\n"
" sll 2,0(1)\n"
" x 2,%2\n" /* make AND mask */
" l %0,0(%1)\n"
"0: lr 1,%0\n" /* CS loop starts here */
" nr 1,2\n" /* clear bit */
" cs %0,1,0(%1)\n"
" jl 0b\n"
" x 2,%2\n"
" nr %0,2\n" /* isolate old bit */
: "+a" (nr), "+a" (addr) : "m" (mask)
: "cc", "memory", "1", "2" );
return nr;
addr += (nr ^ (nr & 31)) >> 3; /* calculate address for CS */
mask = ~(1UL << (nr & 31)); /* make AND mask */
asm volatile(
" l %0,0(%4)\n"
"0: lr %1,%0\n"
" nr %1,%3\n"
" cs %0,%1,0(%4)\n"
" jl 0b"
: "=&d" (old), "=&d" (new), "+m" (*(unsigned int *) addr)
: "d" (mask), "a" (addr)
: "cc" );
return (old ^ new) != 0;
}
/*
* SMP save test_and_change_bit routine based on compare and swap (CS)
*/
static __inline__ int test_and_change_bit_cs(int nr, volatile void * addr)
static inline int test_and_change_bit_cs(int nr, volatile void *ptr)
{
__asm__ __volatile__(
unsigned long addr, old, new, mask;
addr = (unsigned long) ptr;
#if ALIGN_CS == 1
" lhi 1,3\n" /* CS must be aligned on 4 byte b. */
" nr 1,%1\n" /* isolate last 2 bits of address */
" xr %1,1\n" /* make addr % 4 == 0 */
" sll 1,3\n"
" ar %0,1\n" /* add alignement to bitnr */
addr ^= addr & 3; /* align address to 4 */
nr += (addr & 3) << 3; /* add alignment to bit number */
#endif
" lhi 1,31\n"
" nr 1,%0\n" /* make shift value */
" xr %0,1\n"
" srl %0,3\n"
" la %1,0(%0,%1)\n" /* calc. address for CS */
" lhi 2,1\n"
" sll 2,0(1)\n" /* make OR mask */
" l %0,0(%1)\n"
"0: lr 1,%0\n" /* CS loop starts here */
" xr 1,2\n" /* change bit */
" cs %0,1,0(%1)\n"
" jl 0b\n"
" nr %0,2\n" /* isolate old bit */
: "+a" (nr), "+a" (addr) :
: "cc", "memory", "1", "2" );
return nr;
addr += (nr ^ (nr & 31)) >> 3; /* calculate address for CS */
mask = 1UL << (nr & 31); /* make XOR mask */
asm volatile(
" l %0,0(%4)\n"
"0: lr %1,%0\n"
" xr %1,%3\n"
" cs %0,%1,0(%4)\n"
" jl 0b"
: "=&d" (old), "=&d" (new), "+m" (*(unsigned int *) addr)
: "d" (mask), "a" (addr)
: "cc" );
return (old & mask) != 0;
}
#endif /* CONFIG_SMP */
/*
* fast, non-SMP set_bit routine
*/
static __inline__ void __set_bit(int nr, volatile void * addr)
static inline void __set_bit(int nr, volatile void *ptr)
{
__asm__ __volatile__(
" lhi 2,24\n"
" lhi 1,7\n"
" xr 2,%0\n"
" nr 1,%0\n"
" srl 2,3\n"
" la 2,0(2,%1)\n"
" la 1,0(1,%2)\n"
" oc 0(1,2),0(1)"
: : "r" (nr), "a" (addr), "a" (&_oi_bitmap)
: "cc", "memory", "1", "2" );
unsigned long addr;
addr = (unsigned long) ptr + ((nr ^ 24) >> 3);
asm volatile("oc 0(1,%1),0(%2)"
: "+m" (*(char *) addr)
: "a" (addr), "a" (_oi_bitmap + (nr & 7))
: "cc" );
}
static __inline__ void
__constant_set_bit(const int nr, volatile void * addr)
static inline void
__constant_set_bit(const int nr, volatile void *ptr)
{
unsigned long addr;
addr = ((unsigned long) ptr) + ((nr >> 3) ^ 3);
switch (nr&7) {
case 0:
__asm__ __volatile__ ("la 1,%0\n\t"
"oi 0(1),0x01"
: "=m" (*((volatile char *) addr + ((nr>>3)^3)))
: : "1", "cc", "memory");
asm volatile ("oi 0(%1),0x01"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 1:
__asm__ __volatile__ ("la 1,%0\n\t"
"oi 0(1),0x02"
: "=m" (*((volatile char *) addr + ((nr>>3)^3)))
: : "1", "cc", "memory" );
asm volatile ("oi 0(%1),0x02"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 2:
__asm__ __volatile__ ("la 1,%0\n\t"
"oi 0(1),0x04"
: "=m" (*((volatile char *) addr + ((nr>>3)^3)))
: : "1", "cc", "memory" );
asm volatile ("oi 0(%1),0x04"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 3:
__asm__ __volatile__ ("la 1,%0\n\t"
"oi 0(1),0x08"
: "=m" (*((volatile char *) addr + ((nr>>3)^3)))
: : "1", "cc", "memory" );
asm volatile ("oi 0(%1),0x08"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 4:
__asm__ __volatile__ ("la 1,%0\n\t"
"oi 0(1),0x10"
: "=m" (*((volatile char *) addr + ((nr>>3)^3)))
: : "1", "cc", "memory" );
asm volatile ("oi 0(%1),0x10"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 5:
__asm__ __volatile__ ("la 1,%0\n\t"
"oi 0(1),0x20"
: "=m" (*((volatile char *) addr + ((nr>>3)^3)))
: : "1", "cc", "memory" );
asm volatile ("oi 0(%1),0x20"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 6:
__asm__ __volatile__ ("la 1,%0\n\t"
"oi 0(1),0x40"
: "=m" (*((volatile char *) addr + ((nr>>3)^3)))
: : "1", "cc", "memory" );
asm volatile ("oi 0(%1),0x40"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 7:
__asm__ __volatile__ ("la 1,%0\n\t"
"oi 0(1),0x80"
: "=m" (*((volatile char *) addr + ((nr>>3)^3)))
: : "1", "cc", "memory" );
asm volatile ("oi 0(%1),0x80"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
}
}
......@@ -316,73 +268,56 @@ __constant_set_bit(const int nr, volatile void * addr)
/*
* fast, non-SMP clear_bit routine
*/
static __inline__ void
__clear_bit(int nr, volatile void * addr)
static inline void
__clear_bit(int nr, volatile void *ptr)
{
__asm__ __volatile__(
" lhi 2,24\n"
" lhi 1,7\n"
" xr 2,%0\n"
" nr 1,%0\n"
" srl 2,3\n"
" la 2,0(2,%1)\n"
" la 1,0(1,%2)\n"
" nc 0(1,2),0(1)"
: : "r" (nr), "a" (addr), "a" (&_ni_bitmap)
: "cc", "memory", "1", "2" );
unsigned long addr;
addr = (unsigned long) ptr + ((nr ^ 24) >> 3);
asm volatile("nc 0(1,%1),0(%2)"
: "+m" (*(char *) addr)
: "a" (addr), "a" (_ni_bitmap + (nr & 7))
: "cc" );
}
static __inline__ void
__constant_clear_bit(const int nr, volatile void * addr)
static inline void
__constant_clear_bit(const int nr, volatile void *ptr)
{
unsigned long addr;
addr = ((unsigned long) ptr) + ((nr >> 3) ^ 3);
switch (nr&7) {
case 0:
__asm__ __volatile__ ("la 1,%0\n\t"
"ni 0(1),0xFE"
: "=m" (*((volatile char *) addr + ((nr>>3)^3)))
: : "1", "cc", "memory" );
asm volatile ("ni 0(%1),0xFE"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 1:
__asm__ __volatile__ ("la 1,%0\n\t"
"ni 0(1),0xFD"
: "=m" (*((volatile char *) addr + ((nr>>3)^3)))
: : "1", "cc", "memory" );
asm volatile ("ni 0(%1),0xFD"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 2:
__asm__ __volatile__ ("la 1,%0\n\t"
"ni 0(1),0xFB"
: "=m" (*((volatile char *) addr + ((nr>>3)^3)))
: : "1", "cc", "memory" );
asm volatile ("ni 0(%1),0xFB"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 3:
__asm__ __volatile__ ("la 1,%0\n\t"
"ni 0(1),0xF7"
: "=m" (*((volatile char *) addr + ((nr>>3)^3)))
: : "1", "cc", "memory" );
asm volatile ("ni 0(%1),0xF7"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 4:
__asm__ __volatile__ ("la 1,%0\n\t"
"ni 0(1),0xEF"
: "=m" (*((volatile char *) addr + ((nr>>3)^3)))
: : "cc", "memory" );
asm volatile ("ni 0(%1),0xEF"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 5:
__asm__ __volatile__ ("la 1,%0\n\t"
"ni 0(1),0xDF"
: "=m" (*((volatile char *) addr + ((nr>>3)^3)))
: : "1", "cc", "memory" );
asm volatile ("ni 0(%1),0xDF"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 6:
__asm__ __volatile__ ("la 1,%0\n\t"
"ni 0(1),0xBF"
: "=m" (*((volatile char *) addr + ((nr>>3)^3)))
: : "1", "cc", "memory" );
asm volatile ("ni 0(%1),0xBF"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 7:
__asm__ __volatile__ ("la 1,%0\n\t"
"ni 0(1),0x7F"
: "=m" (*((volatile char *) addr + ((nr>>3)^3)))
: : "1", "cc", "memory" );
asm volatile ("ni 0(%1),0x7F"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
}
}
......@@ -395,72 +330,55 @@ __constant_clear_bit(const int nr, volatile void * addr)
/*
* fast, non-SMP change_bit routine
*/
static __inline__ void __change_bit(int nr, volatile void * addr)
static inline void __change_bit(int nr, volatile void *ptr)
{
__asm__ __volatile__(
" lhi 2,24\n"
" lhi 1,7\n"
" xr 2,%0\n"
" nr 1,%0\n"
" srl 2,3\n"
" la 2,0(2,%1)\n"
" la 1,0(1,%2)\n"
" xc 0(1,2),0(1)"
: : "r" (nr), "a" (addr), "a" (&_oi_bitmap)
: "cc", "memory", "1", "2" );
unsigned long addr;
addr = (unsigned long) ptr + ((nr ^ 24) >> 3);
asm volatile("xc 0(1,%1),0(%2)"
: "+m" (*(char *) addr)
: "a" (addr), "a" (_oi_bitmap + (nr & 7))
: "cc" );
}
static __inline__ void
__constant_change_bit(const int nr, volatile void * addr)
static inline void
__constant_change_bit(const int nr, volatile void *ptr)
{
unsigned long addr;
addr = ((unsigned long) ptr) + ((nr >> 3) ^ 3);
switch (nr&7) {
case 0:
__asm__ __volatile__ ("la 1,%0\n\t"
"xi 0(1),0x01"
: "=m" (*((volatile char *) addr + ((nr>>3)^3)))
: : "cc", "memory" );
asm volatile ("xi 0(%1),0x01"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 1:
__asm__ __volatile__ ("la 1,%0\n\t"
"xi 0(1),0x02"
: "=m" (*((volatile char *) addr + ((nr>>3)^3)))
: : "cc", "memory" );
asm volatile ("xi 0(%1),0x02"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 2:
__asm__ __volatile__ ("la 1,%0\n\t"
"xi 0(1),0x04"
: "=m" (*((volatile char *) addr + ((nr>>3)^3)))
: : "cc", "memory" );
asm volatile ("xi 0(%1),0x04"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 3:
__asm__ __volatile__ ("la 1,%0\n\t"
"xi 0(1),0x08"
: "=m" (*((volatile char *) addr + ((nr>>3)^3)))
: : "cc", "memory" );
asm volatile ("xi 0(%1),0x08"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 4:
__asm__ __volatile__ ("la 1,%0\n\t"
"xi 0(1),0x10"
: "=m" (*((volatile char *) addr + ((nr>>3)^3)))
: : "cc", "memory" );
asm volatile ("xi 0(%1),0x10"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 5:
__asm__ __volatile__ ("la 1,%0\n\t"
"xi 0(1),0x20"
: "=m" (*((volatile char *) addr + ((nr>>3)^3)))
: : "1", "cc", "memory" );
asm volatile ("xi 0(%1),0x20"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 6:
__asm__ __volatile__ ("la 1,%0\n\t"
"xi 0(1),0x40"
: "=m" (*((volatile char *) addr + ((nr>>3)^3)))
: : "1", "cc", "memory" );
asm volatile ("xi 0(%1),0x40"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 7:
__asm__ __volatile__ ("la 1,%0\n\t"
"xi 0(1),0x80"
: "=m" (*((volatile char *) addr + ((nr>>3)^3)))
: : "1", "cc", "memory" );
asm volatile ("xi 0(%1),0x80"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
}
}
......@@ -473,80 +391,54 @@ __constant_change_bit(const int nr, volatile void * addr)
/*
* fast, non-SMP test_and_set_bit routine
*/
static __inline__ int test_and_set_bit_simple(int nr, volatile void * addr)
static inline int test_and_set_bit_simple(int nr, volatile void *ptr)
{
static const int mask = 1;
int oldbit;
__asm__ __volatile__(
" lhi 1,24\n"
" lhi 2,7\n"
" xr 1,%1\n"
" nr 2,1\n"
" srl 1,3\n"
" la 1,0(1,%2)\n"
" ic %0,0(1)\n"
" srl %0,0(2)\n"
" n %0,%4\n"
" la 2,0(2,%3)\n"
" oc 0(1,1),0(2)"
: "=d&" (oldbit) : "r" (nr), "a" (addr),
"a" (&_oi_bitmap), "m" (mask)
: "cc", "memory", "1", "2" );
return oldbit;
unsigned long addr;
unsigned char ch;
addr = (unsigned long) ptr + ((nr ^ 24) >> 3);
ch = *(unsigned char *) addr;
asm volatile("oc 0(1,%1),0(%2)"
: "+m" (*(char *) addr)
: "a" (addr), "a" (_oi_bitmap + (nr & 7))
: "cc" );
return (ch >> (nr & 7)) & 1;
}
#define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y)
/*
* fast, non-SMP test_and_clear_bit routine
*/
static __inline__ int test_and_clear_bit_simple(int nr, volatile void * addr)
static inline int test_and_clear_bit_simple(int nr, volatile void *ptr)
{
static const int mask = 1;
int oldbit;
__asm__ __volatile__(
" lhi 1,24\n"
" lhi 2,7\n"
" xr 1,%1\n"
" nr 2,1\n"
" srl 1,3\n"
" la 1,0(1,%2)\n"
" ic %0,0(1)\n"
" srl %0,0(2)\n"
" n %0,%4\n"
" la 2,0(2,%3)\n"
" nc 0(1,1),0(2)"
: "=d&" (oldbit) : "r" (nr), "a" (addr),
"a" (&_ni_bitmap), "m" (mask)
: "cc", "memory", "1", "2" );
return oldbit;
unsigned long addr;
unsigned char ch;
addr = (unsigned long) ptr + ((nr ^ 24) >> 3);
ch = *(unsigned char *) addr;
asm volatile("nc 0(1,%1),0(%2)"
: "+m" (*(char *) addr)
: "a" (addr), "a" (_ni_bitmap + (nr & 7))
: "cc" );
return (ch >> (nr & 7)) & 1;
}
#define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y)
/*
* fast, non-SMP test_and_change_bit routine
*/
static __inline__ int test_and_change_bit_simple(int nr, volatile void * addr)
static inline int test_and_change_bit_simple(int nr, volatile void *ptr)
{
static const int mask = 1;
int oldbit;
__asm__ __volatile__(
" lhi 1,24\n"
" lhi 2,7\n"
" xr 1,%1\n"
" nr 2,1\n"
" srl 1,3\n"
" la 1,0(1,%2)\n"
" ic %0,0(1)\n"
" srl %0,0(2)\n"
" n %0,%4\n"
" la 2,0(2,%3)\n"
" xc 0(1,1),0(2)"
: "=d&" (oldbit) : "r" (nr), "a" (addr),
"a" (&_oi_bitmap), "m" (mask)
: "cc", "memory", "1", "2" );
return oldbit;
unsigned long addr;
unsigned char ch;
addr = (unsigned long) ptr + ((nr ^ 24) >> 3);
ch = *(unsigned char *) addr;
asm volatile("xc 0(1,%1),0(%2)"
: "+m" (*(char *) addr)
: "a" (addr), "a" (_oi_bitmap + (nr & 7))
: "cc" );
return (ch >> (nr & 7)) & 1;
}
#define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y)
......@@ -571,27 +463,17 @@ static __inline__ int test_and_change_bit_simple(int nr, volatile void * addr)
* This routine doesn't need to be atomic.
*/
static __inline__ int __test_bit(int nr, volatile void * addr)
static inline int __test_bit(int nr, volatile void *ptr)
{
static const int mask = 1;
int oldbit;
__asm__ __volatile__(
" lhi 2,24\n"
" lhi 1,7\n"
" xr 2,%1\n"
" nr 1,%1\n"
" srl 2,3\n"
" ic %0,0(2,%2)\n"
" srl %0,0(1)\n"
" n %0,%3"
: "=d&" (oldbit) : "r" (nr), "a" (addr),
"m" (mask)
: "cc", "1", "2" );
return oldbit;
unsigned long addr;
unsigned char ch;
addr = (unsigned long) ptr + ((nr ^ 24) >> 3);
ch = *(unsigned char *) addr;
return (ch >> (nr & 7)) & 1;
}
static __inline__ int __constant_test_bit(int nr, volatile void * addr) {
static inline int __constant_test_bit(int nr, volatile void * addr) {
return (((volatile char *) addr)[(nr>>3)^3] & (1<<(nr&7))) != 0;
}
......@@ -603,50 +485,86 @@ static __inline__ int __constant_test_bit(int nr, volatile void * addr) {
/*
* Find-bit routines..
*/
static __inline__ int find_first_zero_bit(void * addr, unsigned size)
static inline int find_first_zero_bit(void * addr, unsigned size)
{
static const int mask = 0xffL;
unsigned long cmp, count;
int res;
if (!size)
return 0;
__asm__(" lhi 0,-1\n"
" lr 1,%1\n"
" ahi 1,31\n"
" srl 1,5\n"
" sr 2,2\n"
"0: c 0,0(2,%2)\n"
__asm__(" lhi %1,-1\n"
" lr %2,%3\n"
" slr %0,%0\n"
" ahi %2,31\n"
" srl %2,5\n"
"0: c %1,0(%0,%4)\n"
" jne 1f\n"
" ahi 2,4\n"
" brct 1,0b\n"
" lr 2,%1\n"
" ahi %0,4\n"
" brct %2,0b\n"
" lr %0,%3\n"
" j 4f\n"
"1: l 1,0(2,%2)\n"
" sll 2,3\n"
" tml 1,0xFFFF\n"
"1: l %2,0(%0,%4)\n"
" sll %0,3\n"
" lhi %1,0xff\n"
" tml %2,0xffff\n"
" jno 2f\n"
" ahi 2,16\n"
" srl 1,16\n"
"2: tml 1,0x00FF\n"
" ahi %0,16\n"
" srl %2,16\n"
"2: tml %2,0x00ff\n"
" jno 3f\n"
" ahi 2,8\n"
" srl 1,8\n"
"3: n 1,%3\n"
" ic 1,0(1,%4)\n"
" n 1,%3\n"
" ar 2,1\n"
"4: lr %0,2"
: "=d" (res) : "a" (size), "a" (addr),
"m" (mask), "a" (&_zb_findmap)
: "cc", "0", "1", "2" );
" ahi %0,8\n"
" srl %2,8\n"
"3: nr %2,%1\n"
" ic %2,0(%2,%5)\n"
" alr %0,%2\n"
"4:"
: "=&a" (res), "=&d" (cmp), "=&a" (count)
: "a" (size), "a" (addr), "a" (&_zb_findmap) : "cc" );
return (res < size) ? res : size;
}
static inline int find_first_bit(void * addr, unsigned size)
{
unsigned long cmp, count;
int res;
if (!size)
return 0;
__asm__(" slr %1,%1\n"
" lr %2,%3\n"
" slr %0,%0\n"
" ahi %2,31\n"
" srl %2,5\n"
"0: c %1,0(%0,%4)\n"
" jne 1f\n"
" ahi %0,4\n"
" brct %2,0b\n"
" lr %0,%3\n"
" j 4f\n"
"1: l %2,0(%0,%4)\n"
" sll %0,3\n"
" lhi %1,0xff\n"
" tml %2,0xffff\n"
" jnz 2f\n"
" ahi %0,16\n"
" srl %2,16\n"
"2: tml %2,0x00ff\n"
" jnz 3f\n"
" ahi %0,8\n"
" srl %2,8\n"
"3: nr %2,%1\n"
" ic %2,0(%2,%5)\n"
" alr %0,%2\n"
"4:"
: "=&a" (res), "=&d" (cmp), "=&a" (count)
: "a" (size), "a" (addr), "a" (&_sb_findmap) : "cc" );
return (res < size) ? res : size;
}
static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
static inline int find_next_zero_bit (void * addr, int size, int offset)
{
static const int mask = 0xffL;
unsigned long * p = ((unsigned long *) addr) + (offset >> 5);
unsigned long bitvec;
unsigned long bitvec, reg;
int set, bit = offset & 31, res;
if (bit) {
......@@ -654,23 +572,21 @@ static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
* Look for zero in first word
*/
bitvec = (*p) >> bit;
__asm__(" lr 1,%1\n"
" sr %0,%0\n"
" tml 1,0xFFFF\n"
__asm__(" slr %0,%0\n"
" lhi %2,0xff\n"
" tml %1,0xffff\n"
" jno 0f\n"
" ahi %0,16\n"
" srl 1,16\n"
"0: tml 1,0x00FF\n"
" srl %1,16\n"
"0: tml %1,0x00ff\n"
" jno 1f\n"
" ahi %0,8\n"
" srl 1,8\n"
"1: n 1,%2\n"
" ic 1,0(1,%3)\n"
" n 1,%2\n"
" ar %0,1"
: "=d&" (set) : "d" (bitvec),
"m" (mask), "a" (&_zb_findmap)
: "cc", "1" );
" srl %1,8\n"
"1: nr %1,%2\n"
" ic %1,0(%1,%3)\n"
" alr %0,%1"
: "=&d" (set), "+a" (bitvec), "=&d" (reg)
: "a" (&_zb_findmap) : "cc" );
if (set < (32 - bit))
return set + offset;
offset += 32 - bit;
......@@ -683,72 +599,175 @@ static __inline__ int find_next_zero_bit (void * addr, int size, int offset)
return (offset + res);
}
static inline int find_next_bit (void * addr, int size, int offset)
{
unsigned long * p = ((unsigned long *) addr) + (offset >> 5);
unsigned long bitvec, reg;
int set, bit = offset & 31, res;
if (bit) {
/*
* Look for set bit in first word
*/
bitvec = (*p) >> bit;
__asm__(" slr %0,%0\n"
" lhi %2,0xff\n"
" tml %1,0xffff\n"
" jnz 0f\n"
" ahi %0,16\n"
" srl %1,16\n"
"0: tml %1,0x00ff\n"
" jnz 1f\n"
" ahi %0,8\n"
" srl %1,8\n"
"1: nr %1,%2\n"
" ic %1,0(%1,%3)\n"
" alr %0,%1"
: "=&d" (set), "+a" (bitvec), "=&d" (reg)
: "a" (&_sb_findmap) : "cc" );
if (set < (32 - bit))
return set + offset;
offset += 32 - bit;
p++;
}
/*
* No set bit yet, search remaining full words for a bit
*/
res = find_first_bit (p, size - 32 * (p - (unsigned long *) addr));
return (offset + res);
}
/*
* ffz = Find First Zero in word. Undefined if no zero exists,
* so code should check against ~0UL first..
*/
static __inline__ unsigned long ffz(unsigned long word)
static inline unsigned long ffz(unsigned long word)
{
static const int mask = 0xffL;
unsigned long reg;
int result;
__asm__(" lr 1,%1\n"
" sr %0,%0\n"
" tml 1,0xFFFF\n"
__asm__(" slr %0,%0\n"
" lhi %2,0xff\n"
" tml %1,0xffff\n"
" jno 0f\n"
" ahi %0,16\n"
" srl 1,16\n"
"0: tml 1,0x00FF\n"
" srl %1,16\n"
"0: tml %1,0x00ff\n"
" jno 1f\n"
" ahi %0,8\n"
" srl 1,8\n"
"1: n 1,%2\n"
" ic 1,0(1,%3)\n"
" n 1,%2\n"
" ar %0,1"
: "=d&" (result) : "d" (word),
"m" (mask), "a" (&_zb_findmap)
: "cc", "1" );
" srl %1,8\n"
"1: nr %1,%2\n"
" ic %1,0(%1,%3)\n"
" alr %0,%1"
: "=&d" (result), "+a" (word), "=&d" (reg)
: "a" (&_zb_findmap) : "cc" );
return result;
}
/*
* __ffs = find first bit in word. Undefined if no bit exists,
* so code should check against 0UL first..
*/
static inline unsigned long __ffs(unsigned long word)
{
unsigned long reg, result;
__asm__(" slr %0,%0\n"
" lhi %2,0xff\n"
" tml %1,0xffff\n"
" jnz 0f\n"
" ahi %0,16\n"
" srl %1,16\n"
"0: tml %1,0x00ff\n"
" jnz 1f\n"
" ahi %0,8\n"
" srl %1,8\n"
"1: nr %1,%2\n"
" ic %1,0(%1,%3)\n"
" alr %0,%1"
: "=&d" (result), "+a" (word), "=&d" (reg)
: "a" (&_sb_findmap) : "cc" );
return result;
}
/*
* Every architecture must define this function. It's the fastest
* way of searching a 140-bit bitmap where the first 100 bits are
* unlikely to be set. It's guaranteed that at least one of the 140
* bits is cleared.
*/
static inline int sched_find_first_bit(unsigned long *b)
{
return find_first_bit(b, 140);
}
/*
* ffs: find first bit set. This is defined the same way as
* the libc and compiler builtin ffs routines, therefore
* differs in spirit from the above ffz (man ffs).
*/
extern int __inline__ ffs (int x)
extern int inline ffs (int x)
{
int r;
int r = 1;
if (x == 0)
return 0;
__asm__(" lr %%r1,%1\n"
" sr %0,%0\n"
" tml %%r1,0xFFFF\n"
__asm__(" tml %1,0xffff\n"
" jnz 0f\n"
" srl %1,16\n"
" ahi %0,16\n"
" srl %%r1,16\n"
"0: tml %%r1,0x00FF\n"
"0: tml %1,0x00ff\n"
" jnz 1f\n"
" srl %1,8\n"
" ahi %0,8\n"
" srl %%r1,8\n"
"1: tml %%r1,0x000F\n"
"1: tml %1,0x000f\n"
" jnz 2f\n"
" srl %1,4\n"
" ahi %0,4\n"
" srl %%r1,4\n"
"2: tml %%r1,0x0003\n"
"2: tml %1,0x0003\n"
" jnz 3f\n"
" srl %1,2\n"
" ahi %0,2\n"
" srl %%r1,2\n"
"3: tml %%r1,0x0001\n"
"3: tml %1,0x0001\n"
" jnz 4f\n"
" ahi %0,1\n"
"4:"
: "=&d" (r) : "d" (x) : "cc", "1" );
return r+1;
: "=&d" (r), "+d" (x) : : "cc" );
return r;
}
/*
* fls: find last bit set.
*/
extern __inline__ int fls(int x)
{
int r = 32;
if (x == 0)
return 0;
__asm__(" tmh %1,0xffff\n"
" jz 0f\n"
" sll %1,16\n"
" ahi %0,-16\n"
"0: tmh %1,0xff00\n"
" jz 1f\n"
" sll %1,8\n"
" ahi %0,-8\n"
"1: tmh %1,0xf000\n"
" jz 2f\n"
" sll %1,4\n"
" ahi %0,-4\n"
"2: tmh %1,0xc000\n"
" jz 3f\n"
" sll %1,2\n"
" ahi %0,-2\n"
"3: tmh %1,0x8000\n"
" jz 4f\n"
" ahi %0,-1\n"
"4:"
: "+d" (r), "+d" (x) : : "cc" );
return r;
}
/*
......@@ -776,51 +795,51 @@ extern int __inline__ ffs (int x)
#define ext2_set_bit(nr, addr) test_and_set_bit((nr)^24, addr)
#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr)^24, addr)
#define ext2_test_bit(nr, addr) test_bit((nr)^24, addr)
static __inline__ int ext2_find_first_zero_bit(void *vaddr, unsigned size)
static inline int ext2_find_first_zero_bit(void *vaddr, unsigned size)
{
unsigned long cmp, count;
int res;
if (!size)
return 0;
__asm__(" lhi 0,-1\n"
" lr 1,%1\n"
" ahi 1,31\n"
" srl 1,5\n"
" sr 2,2\n"
"0: c 0,0(2,%2)\n"
__asm__(" lhi %1,-1\n"
" lr %2,%3\n"
" ahi %2,31\n"
" srl %2,5\n"
" slr %0,%0\n"
"0: cl %1,0(%0,%4)\n"
" jne 1f\n"
" ahi 2,4\n"
" brct 1,0b\n"
" lr 2,%1\n"
" ahi %0,4\n"
" brct %2,0b\n"
" lr %0,%3\n"
" j 4f\n"
"1: l 1,0(2,%2)\n"
" sll 2,3\n"
" lhi 0,0xff\n"
" ahi 2,24\n"
" tmh 1,0xFFFF\n"
"1: l %2,0(%0,%4)\n"
" sll %0,3\n"
" ahi %0,24\n"
" lhi %1,0xff\n"
" tmh %2,0xffff\n"
" jo 2f\n"
" ahi 2,-16\n"
" srl 1,16\n"
"2: tml 1,0xFF00\n"
" ahi %0,-16\n"
" srl %2,16\n"
"2: tml %2,0xff00\n"
" jo 3f\n"
" ahi 2,-8\n"
" srl 1,8\n"
"3: nr 1,0\n"
" ic 1,0(1,%3)\n"
" ar 2,1\n"
"4: lr %0,2"
: "=d" (res) : "a" (size), "a" (vaddr),
"a" (&_zb_findmap)
: "cc", "0", "1", "2" );
" ahi %0,-8\n"
" srl %2,8\n"
"3: nr %2,%1\n"
" ic %2,0(%2,%5)\n"
" alr %0,%2\n"
"4:"
: "=&a" (res), "=&d" (cmp), "=&a" (count)
: "a" (size), "a" (vaddr), "a" (&_zb_findmap) : "cc" );
return (res < size) ? res : size;
}
static __inline__ int
static inline int
ext2_find_next_zero_bit(void *vaddr, unsigned size, unsigned offset)
{
unsigned long *addr = vaddr;
unsigned long *p = addr + (offset >> 5);
unsigned long word;
unsigned long word, reg;
int bit = offset & 31UL, res;
if (offset >= size)
......@@ -835,7 +854,7 @@ ext2_find_next_zero_bit(void *vaddr, unsigned size, unsigned offset)
word >>= bit;
res = bit;
/* Look for zero in first longword */
__asm__(" lhi 0,0xff\n"
__asm__(" lhi %2,0xff\n"
" tml %1,0xffff\n"
" jno 0f\n"
" ahi %0,16\n"
......@@ -844,12 +863,11 @@ ext2_find_next_zero_bit(void *vaddr, unsigned size, unsigned offset)
" jno 1f\n"
" ahi %0,8\n"
" srl %1,8\n"
"1: nr %1,0\n"
" ic %1,0(%1,%2)\n"
"1: nr %1,%2\n"
" ic %1,0(%1,%3)\n"
" alr %0,%1"
: "+&d" (res), "+&a" (word)
: "a" (&_zb_findmap)
: "cc", "0" );
: "+&d" (res), "+&a" (word), "=&d" (reg)
: "a" (&_zb_findmap) : "cc" );
if (res < 32)
return (p - addr)*32 + res;
p++;
......
......@@ -13,20 +13,6 @@
#ifdef __GNUC__
static __inline__ __const__ __u32 ___arch__swab32(__u32 x)
{
__u32 temp;
__asm__ __volatile__ (
" st %0,0(%1)\n"
" icm %0,8,3(%1)\n"
" icm %0,4,2(%1)\n"
" icm %0,2,1(%1)\n"
" ic %0,0(%1)"
: "+&d" (x) : "a" (&temp) : "cc" );
return x;
}
static __inline__ __const__ __u32 ___arch__swab32p(__u32 *x)
{
__u32 result;
......@@ -40,27 +26,14 @@ static __inline__ __const__ __u32 ___arch__swab32p(__u32 *x)
return result;
}
static __inline__ void ___arch__swab32s(__u32 *x)
static __inline__ __const__ __u32 ___arch__swab32(__u32 x)
{
__asm__ __volatile__ (
" icm 0,8,3(%0)\n"
" icm 0,4,2(%0)\n"
" icm 0,2,1(%0)\n"
" ic 0,0(%0)\n"
" st 0,0(%0)"
: : "a" (x) : "0", "memory", "cc");
return ___arch__swab32p(&x);
}
static __inline__ __const__ __u16 ___arch__swab16(__u16 x)
static __inline__ void ___arch__swab32s(__u32 *x)
{
__u16 temp;
__asm__ __volatile__ (
" sth %0,0(%1)\n"
" icm %0,2,1(%1)\n"
" ic %0,0(%1)\n"
: "+&d" (x) : "a" (&temp) : "memory", "cc" );
return x;
*x = ___arch__swab32p(x);
}
static __inline__ __const__ __u16 ___arch__swab16p(__u16 *x)
......@@ -68,20 +41,20 @@ static __inline__ __const__ __u16 ___arch__swab16p(__u16 *x)
__u16 result;
__asm__ __volatile__ (
" sr %0,%0\n"
" icm %0,2,1(%1)\n"
" ic %0,0(%1)\n"
: "=&d" (result) : "a" (x) : "cc" );
return result;
}
static __inline__ __const__ __u16 ___arch__swab16(__u16 x)
{
return ___arch__swab16p(&x);
}
static __inline__ void ___arch__swab16s(__u16 *x)
{
__asm__ __volatile__(
" icm 0,2,1(%0)\n"
" ic 0,0(%0)\n"
" sth 0,0(%0)"
: : "a" (x) : "0", "memory", "cc" );
*x = ___arch__swab16p(x);
}
#define __arch__swab32(x) ___arch__swab32(x)
......
#ifndef _S390_CACHEFLUSH_H
#define _S390_CACHEFLUSH_H
/* Keep includes the same across arches. */
#include <linux/mm.h>
/* Caches aren't brain-dead on the s390. */
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#endif /* _S390_CACHEFLUSH_H */
/*
* File...........: linux/include/asm-s390/ccwcache.h
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000
*/
#ifndef CCWCACHE_H
#define CCWCACHE_H
#include <linux/slab.h>
#include <asm/irq.h>
#ifndef __KERNEL__
#define kmem_cache_t void
#endif /* __KERNEL__ */
typedef struct ccw_req_t {
/* eye catcher plus queueing information */
unsigned int magic;
struct ccw_req_t *next; /* pointer to next ccw_req_t in queue */
struct ccw_req_t *int_next; /* for internal queueing */
struct ccw_req_t *int_prev; /* for internal queueing */
/* Where to execute what... */
void *device; /* index of the device the req is for */
void *req; /* pointer to originating request */
ccw1_t *cpaddr; /* address of channel program */
char status; /* reflecting the status of this request */
char flags; /* see below */
short retries; /* A retry counter to be set when filling */
/* ... and how */
int options; /* options for execution */
char lpm; /* logical path mask */
void *data; /* pointer to data area */
devstat_t *dstat; /* The device status in case of an error */
/* these are important for recovering erroneous requests */
struct ccw_req_t *refers; /* Does this request refer to another one? */
void *function; /* refers to the originating ERP action */ ;
unsigned long long expires; /* expiratioj period */
/* these are for profiling purposes */
unsigned long long buildclk; /* TOD-clock of request generation */
unsigned long long startclk; /* TOD-clock of request start */
unsigned long long stopclk; /* TOD-clock of request interrupt */
unsigned long long endclk; /* TOD-clock of request termination */
/* these are for internal use */
int cplength; /* length of the channel program in CCWs */
int datasize; /* amount of additional data in bytes */
kmem_cache_t *cache; /* the cache this data comes from */
} __attribute__ ((aligned(4))) ccw_req_t;
/*
* ccw_req_t -> status can be:
*/
#define CQR_STATUS_EMPTY 0x00 /* request is empty */
#define CQR_STATUS_FILLED 0x01 /* request is ready to be preocessed */
#define CQR_STATUS_QUEUED 0x02 /* request is queued to be processed */
#define CQR_STATUS_IN_IO 0x03 /* request is currently in IO */
#define CQR_STATUS_DONE 0x04 /* request is completed successfully */
#define CQR_STATUS_ERROR 0x05 /* request is completed with error */
#define CQR_STATUS_FAILED 0x06 /* request is finally failed */
#define CQR_STATUS_PENDING 0x07 /* request is waiting for interrupt - ERP only */
#define CQR_FLAGS_CHAINED 0x01 /* request is chained by another (last CCW is TIC) */
#ifdef __KERNEL__
#define SMALLEST_SLAB (sizeof(struct ccw_req_t) <= 128 ? 128 :\
sizeof(struct ccw_req_t) <= 256 ? 256 : 512 )
/* SMALLEST_SLAB(1),... PAGE_SIZE(CCW_NUMBER_CACHES) */
#define CCW_NUMBER_CACHES (sizeof(struct ccw_req_t) <= 128 ? 6 :\
sizeof(struct ccw_req_t) <= 256 ? 5 : 4 )
int ccwcache_init (void);
ccw_req_t *ccw_alloc_request (char *magic, int cplength, int additional_data);
void ccw_free_request (ccw_req_t * request);
#endif /* __KERNEL__ */
#endif /* CCWCACHE_H */
......@@ -13,16 +13,14 @@
#ifdef __KERNEL__
#include <asm/thread_info.h>
struct task_struct;
static inline struct task_struct * get_current(void)
{
struct task_struct *current;
__asm__("lhi %0,-8192\n\t"
"al %0,0xc40"
: "=&r" (current) : : "cc" );
return current;
}
return current_thread_info()->task;
}
#define current get_current()
......
......@@ -10,6 +10,10 @@
*
* History of changes (starts July 2000)
* 05/04/01 created by moving the kernel interface to drivers/s390/block/dasd_int.h
* 12/06/01 DASD_API_VERSION 2 - binary compatible to 0 (new BIODASDINFO2)
* 01/23/02 DASD_API_VERSION 3 - added BIODASDPSRD (and BIODASDENAPAV) IOCTL
* 02/15/02 DASD_API_VERSION 4 - added BIODASDSATTR IOCTL
*
*/
#ifndef DASD_H
......@@ -18,10 +22,125 @@
#define DASD_IOCTL_LETTER 'D'
#if (DASD_API_VERSION == 0)
#define DASD_API_VERSION 4
/*
* struct dasd_information2_t
* represents any data about the device, which is visible to userspace.
* including foramt and featueres.
*/
typedef struct dasd_information2_t {
unsigned int devno; /* S/390 devno */
unsigned int real_devno; /* for aliases */
unsigned int schid; /* S/390 subchannel identifier */
unsigned int cu_type : 16; /* from SenseID */
unsigned int cu_model : 8; /* from SenseID */
unsigned int dev_type : 16; /* from SenseID */
unsigned int dev_model : 8; /* from SenseID */
unsigned int open_count;
unsigned int req_queue_len;
unsigned int chanq_len; /* length of chanq */
char type[4]; /* from discipline.name, 'none' for unknown */
unsigned int status; /* current device level */
unsigned int label_block; /* where to find the VOLSER */
unsigned int FBA_layout; /* fixed block size (like AIXVOL) */
unsigned int characteristics_size;
unsigned int confdata_size;
char characteristics[64]; /* from read_device_characteristics */
char configuration_data[256]; /* from read_configuration_data */
unsigned int format; /* format info like formatted/cdl/ldl/... */
unsigned int features; /* dasd features like 'ro',... */
unsigned int reserved0; /* reserved for further use ,... */
unsigned int reserved1; /* reserved for further use ,... */
unsigned int reserved2; /* reserved for further use ,... */
unsigned int reserved3; /* reserved for further use ,... */
unsigned int reserved4; /* reserved for further use ,... */
unsigned int reserved5; /* reserved for further use ,... */
unsigned int reserved6; /* reserved for further use ,... */
unsigned int reserved7; /* reserved for further use ,... */
} dasd_information2_t;
/*
* values to be used for dasd_information_t.format
* 0x00: NOT formatted
* 0x01: Linux disc layout
* 0x02: Common disc layout
*/
#define DASD_FORMAT_NONE 0
#define DASD_FORMAT_LDL 1
#define DASD_FORMAT_CDL 2
/*
* values to be used for dasd_information_t.features
* 0x00: default features
* 0x01: readonly (ro)
*/
#define DASD_FEATURE_DEFAULT 0
#define DASD_FEATURE_READONLY 1
#define DASD_PARTN_BITS 2
/*
* struct dasd_information_t
* represents any data about the data, which is visible to userspace
*/
typedef struct dasd_information_t {
unsigned int devno; /* S/390 devno */
unsigned int real_devno; /* for aliases */
unsigned int schid; /* S/390 subchannel identifier */
unsigned int cu_type : 16; /* from SenseID */
unsigned int cu_model : 8; /* from SenseID */
unsigned int dev_type : 16; /* from SenseID */
unsigned int dev_model : 8; /* from SenseID */
unsigned int open_count;
unsigned int req_queue_len;
unsigned int chanq_len; /* length of chanq */
char type[4]; /* from discipline.name, 'none' for unknown */
unsigned int status; /* current device level */
unsigned int label_block; /* where to find the VOLSER */
unsigned int FBA_layout; /* fixed block size (like AIXVOL) */
unsigned int characteristics_size;
unsigned int confdata_size;
char characteristics[64]; /* from read_device_characteristics */
char configuration_data[256]; /* from read_configuration_data */
} dasd_information_t;
/*
* Read Subsystem Data - Perfomance Statistics
*/
typedef struct dasd_rssd_perf_stats_t {
unsigned char invalid:1;
unsigned char format:3;
unsigned char data_format:4;
unsigned char unit_address;
unsigned short device_status;
unsigned int nr_read_normal;
unsigned int nr_read_normal_hits;
unsigned int nr_write_normal;
unsigned int nr_write_fast_normal_hits;
unsigned int nr_read_seq;
unsigned int nr_read_seq_hits;
unsigned int nr_write_seq;
unsigned int nr_write_fast_seq_hits;
unsigned int nr_read_cache;
unsigned int nr_read_cache_hits;
unsigned int nr_write_cache;
unsigned int nr_write_fast_cache_hits;
unsigned int nr_inhibit_cache;
unsigned int nr_bybass_cache;
unsigned int nr_seq_dasd_to_cache;
unsigned int nr_dasd_to_cache;
unsigned int nr_cache_to_dasd;
unsigned int nr_delayed_fast_write;
unsigned int nr_normal_fast_write;
unsigned int nr_seq_fast_write;
unsigned int nr_cache_miss;
unsigned char status2;
unsigned int nr_quick_write_promotes;
unsigned char reserved;
unsigned short ssid;
unsigned char reseved2[96];
} __attribute__((packed)) dasd_rssd_perf_stats_t;
/*
* struct profile_info_t
* holds the profinling information
......@@ -62,30 +181,36 @@ typedef struct format_data_t {
#define DASD_FMT_INT_INVAL 4 /* invalidate tracks */
#define DASD_FMT_INT_COMPAT 8 /* use OS/390 compatible disk layout */
/*
* struct dasd_information_t
* represents any data about the data, which is visible to userspace
* struct attrib_data_t
* represents the operation (cache) bits for the device.
* Used in DE to influence caching of the DASD.
*/
typedef struct dasd_information_t {
unsigned int devno; /* S/390 devno */
unsigned int real_devno; /* for aliases */
unsigned int schid; /* S/390 subchannel identifier */
unsigned int cu_type : 16; /* from SenseID */
unsigned int cu_model : 8; /* from SenseID */
unsigned int dev_type : 16; /* from SenseID */
unsigned int dev_model : 8; /* from SenseID */
unsigned int open_count;
unsigned int req_queue_len;
unsigned int chanq_len;
char type[4]; /* from discipline.name, 'none' for unknown */
unsigned int status; /* current device level */
unsigned int label_block; /* where to find the VOLSER */
unsigned int FBA_layout; /* fixed block size (like AIXVOL) */
unsigned int characteristics_size;
unsigned int confdata_size;
char characteristics[64]; /* from read_device_characteristics */
char configuration_data[256]; /* from read_configuration_data */
} dasd_information_t;
typedef struct attrib_data_t {
unsigned char operation:3; /* cache operation mode */
unsigned char reserved:5; /* cache operation mode */
__u16 nr_cyl; /* no of cyliners for read ahaed */
__u8 reserved2[29]; /* for future use */
} __attribute__ ((packed)) attrib_data_t;
/* definition of operation (cache) bits within attributes of DE */
#define DASD_NORMAL_CACHE 0x0
#define DASD_BYPASS_CACHE 0x1
#define DASD_INHIBIT_LOAD 0x2
#define DASD_SEQ_ACCESS 0x3
#define DASD_SEQ_PRESTAGE 0x4
#define DASD_REC_ACCESS 0x5
/********************************************************************************
* SECTION: Definition of IOCTLs
*
* Here ist how the ioctl-nr should be used:
* 0 - 31 DASD driver itself
* 32 - 239 still open
* 240 - 255 reserved for EMC
*******************************************************************************/
/* Disable the volume (for Linux) */
#define BIODASDDISABLE _IO(DASD_IOCTL_LETTER,0)
......@@ -97,15 +222,28 @@ typedef struct dasd_information_t {
#define BIODASDSLCK _IO(DASD_IOCTL_LETTER,4) /* steal lock */
/* reset profiling information of a device */
#define BIODASDPRRST _IO(DASD_IOCTL_LETTER,5)
/* enable PAV */
#define BIODASDENAPAV _IO(DASD_IOCTL_LETTER,6)
/* retrieve API version number */
#define DASDAPIVER _IOR(DASD_IOCTL_LETTER,0,int)
/* Get information on a dasd device */
#define BIODASDINFO _IOR(DASD_IOCTL_LETTER,1,dasd_information_t)
/* retrieve profiling information of a device */
#define BIODASDPRRD _IOR(DASD_IOCTL_LETTER,2,dasd_profile_info_t)
/* Get information on a dasd device (enhanced) */
#define BIODASDINFO2 _IOR(DASD_IOCTL_LETTER,3,dasd_information2_t)
/* Performance Statistics Read */
#define BIODASDPSRD _IOR(DASD_IOCTL_LETTER,4,dasd_rssd_perf_stats_t)
/* #define BIODASDFORMAT _IOW(IOCTL_LETTER,0,format_data_t) , deprecated */
#define BIODASDFMT _IOW(DASD_IOCTL_LETTER,1,format_data_t)
#endif /* DASD_API_VERSION */
/* Set Attributes (cache operations) */
#define BIODASDSATTR _IOW(DASD_IOCTL_LETTER,2,attrib_data_t)
#endif /* DASD_H */
/*
......
......@@ -54,7 +54,7 @@ struct __debug_entry{
#define DEBUG_DATA(entry) (char*)(entry + 1) /* data is stored behind */
/* the entry information */
#define STCK(x) asm volatile ("STCK %0" : "=m" (x) : : "cc" )
#define STCK(x) asm volatile ("STCK 0(%1)" : "=m" (x) : "a" (&(x)) : "cc")
typedef struct __debug_entry debug_entry_t;
......
......@@ -24,7 +24,7 @@ extern __u8 _ebc_toupper[]; /* EBCDIC -> uppercase */
extern __inline__
void codepage_convert(const __u8 *codepage, volatile __u8 * addr, int nr)
{
if (nr <= 0)
if (nr-- <= 0)
return;
__asm__ __volatile__(
" bras 1,1f\n"
......@@ -34,7 +34,7 @@ void codepage_convert(const __u8 *codepage, volatile __u8 * addr, int nr)
"1: ahi %1,-256\n"
" jp 0b\n"
" ex %1,0(1)"
: "+&a" (addr), "+&a" (nr-1)
: "+&a" (addr), "+&a" (nr)
: "a" (codepage) : "cc", "memory", "1" );
}
......
/*
* header file for FCP adapter driver for IBM eServer zSeries
*
* Authors:
* Martin Peschke <mpeschke@de.ibm.com>
* Raimund Schroeder <raimund.schroeder@de.ibm.com>
* Aron Zeh <arzeh@de.ibm.com>
* Wolfgang Taphorn <taphorn@de.ibm.com>
*
* Copyright (C) 2002 IBM Entwicklung GmbH, IBM Corporation
*/
#ifndef FSF_H
#define FSF_H
#define FSF_QTCB_VERSION1 0x00000001
#define FSF_QTCB_CURRENT_VERSION FSF_QTCB_VERSION1
/* FSF commands */
#define FSF_QTCB_FCP_CMND 0x00000001
#define FSF_QTCB_ABORT_FCP_CMND 0x00000002
#define FSF_QTCB_OPEN_PORT_WITH_DID 0x00000005
#define FSF_QTCB_OPEN_LUN 0x00000006
#define FSF_QTCB_CLOSE_LUN 0x00000007
#define FSF_QTCB_CLOSE_PORT 0x00000008
#define FSF_QTCB_CLOSE_PHYSICAL_PORT 0x00000009
#define FSF_QTCB_SEND_ELS 0x0000000B
#define FSF_QTCB_SEND_GENERIC 0x0000000C
#define FSF_QTCB_EXCHANGE_CONFIG_DATA 0x0000000D
/* FSF QTCB types */
#define FSF_IO_COMMAND 0x00000001
#define FSF_SUPPORT_COMMAND 0x00000002
#define FSF_CONFIG_COMMAND 0x00000003
/* association between FSF command and FSF QTCB type */
u32 fsf_qtcb_type[] = {
[ FSF_QTCB_FCP_CMND ] = FSF_IO_COMMAND,
[ FSF_QTCB_ABORT_FCP_CMND ] = FSF_SUPPORT_COMMAND,
[ FSF_QTCB_OPEN_PORT_WITH_DID ] = FSF_SUPPORT_COMMAND,
[ FSF_QTCB_OPEN_LUN ] = FSF_SUPPORT_COMMAND,
[ FSF_QTCB_CLOSE_LUN ] = FSF_SUPPORT_COMMAND,
[ FSF_QTCB_CLOSE_PORT ] = FSF_SUPPORT_COMMAND,
[ FSF_QTCB_CLOSE_PHYSICAL_PORT ] = FSF_SUPPORT_COMMAND,
[ FSF_QTCB_SEND_ELS ] = FSF_SUPPORT_COMMAND,
[ FSF_QTCB_SEND_GENERIC ] = FSF_SUPPORT_COMMAND,
[ FSF_QTCB_EXCHANGE_CONFIG_DATA ] = FSF_CONFIG_COMMAND
};
/* FSF protocol stati */
#define FSF_PROT_GOOD 0x00000001
#define FSF_PROT_QTCB_VERSION_ERROR 0x00000010
#define FSF_PROT_SEQ_NUMB_ERROR 0x00000020
#define FSF_PROT_UNSUPP_QTCB_TYPE 0x00000040
#define FSF_PROT_HOST_CONNECTION_INITIALIZING 0x00000080
#define FSF_PROT_FSF_STATUS_PRESENTED 0x00000100
#define FSF_PROT_DUPLICATE_REQUEST_ID 0x00000200
#define FSF_PROT_LINK_DOWN 0x00000400
#define FSF_PROT_REEST_QUEUE 0x00000800
#define FSF_PROT_ERROR_STATE 0x01000000
/* FSF stati */
#define FSF_GOOD 0x00000000
#define FSF_PORT_ALREADY_OPEN 0x00000001
#define FSF_LUN_ALREADY_OPEN 0x00000002
#define FSF_PORT_HANDLE_NOT_VALID 0x00000003
#define FSF_LUN_HANDLE_NOT_VALID 0x00000004
#define FSF_HANDLE_MISMATCH 0x00000005
#define FSF_SERVICE_CLASS_NOT_SUPPORTED 0x00000006
#define FSF_FCPLUN_NOT_VALID 0x00000009
//#define FSF_ACCESS_DENIED 0x00000010
#define FSF_ACCESS_TYPE_NOT_VALID 0x00000011
#define FSF_LUN_IN_USE 0x00000012
#define FSF_COMMAND_ABORTED_ULP 0x00000020
#define FSF_COMMAND_ABORTED_ADAPTER 0x00000021
#define FSF_FCP_COMMAND_DOES_NOT_EXIST 0x00000022
#define FSF_DIRECTION_INDICATOR_NOT_VALID 0x00000030
#define FSF_INBOUND_DATA_LENGTH_NOT_VALID 0x00000031 /* FIXME: obsolete? */
#define FSF_OUTBOUND_DATA_LENGTH_NOT_VALID 0x00000032 /* FIXME: obsolete? */
#define FSF_CMND_LENGTH_NOT_VALID 0x00000033
#define FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED 0x00000040
#define FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED 0x00000041
#define FSF_REQUEST_BUF_NOT_VALID 0x00000042
#define FSF_RESPONSE_BUF_NOT_VALID 0x00000043
#define FSF_ELS_COMMAND_REJECTED 0x00000050
#define FSF_GENERIC_COMMAND_REJECTED 0x00000051
//#define FSF_AUTHORIZATION_FAILURE 0x00000053
#define FSF_PORT_BOXED 0x00000059
//#define FSF_LUN_BOXED 0x0000005A
#define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD
#define FSF_FCP_RSP_AVAILABLE 0x000000AF
#define FSF_UNKNOWN_COMMAND 0x000000E2
//#define FSF_ERROR 0x000000FF
/* FSF status qualifier, recommendations */
#define FSF_SQ_NO_RECOM 0x00
#define FSF_SQ_FCP_RSP_AVAILABLE 0x01
#define FSF_SQ_RETRY_IF_POSSIBLE 0x02
#define FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED 0x03
#define FSF_SQ_INVOKE_LINK_TEST_PROCEDURE 0x04
#define FSF_SQ_ULP_PROGRAMMING_ERROR 0x05
#define FSF_SQ_COMMAND_ABORTED 0x06
#define FSF_SQ_NO_RETRY_POSSIBLE 0x07
/* FSF status qualifier (most significant 4 bytes), local link down */
#define FSF_PSQ_LINK_NOLIGHT 0x00000004
#define FSF_PSQ_LINK_WRAPPLUG 0x00000008
#define FSF_PSQ_LINK_NOFCP 0x00000010
/* payload size in status read buffer */
#define FSF_STATUS_READ_PAYLOAD_SIZE 4032
/* status types in status read buffer */
#define FSF_STATUS_READ_PORT_CLOSED 0x00000001
#define FSF_STATUS_READ_INCOMING_ELS 0x00000002
#define FSF_STATUS_READ_BIT_ERROR_THRESHOLD 0x00000004
#define FSF_STATUS_READ_LINK_DOWN 0x00000005 /* FIXME: really? */
#define FSF_STATUS_READ_LINK_UP 0x00000006
/* status subtypes in status read buffer */
#define FSF_STATUS_READ_SUB_CLOSE_PHYS_PORT 0x00000001
#define FSF_STATUS_READ_SUB_ERROR_PORT 0x00000002
/* topologie that is detected by the adapter */
#define FSF_TOPO_ERROR 0x00000000
#define FSF_TOPO_P2P 0x00000001
#define FSF_TOPO_FABRIC 0x00000002
#define FSF_TOPO_AL 0x00000003
#define FSF_TOPO_FABRIC_VIRT 0x00000004
/* data direction for FCP commands */
#define FSF_DATADIR_WRITE 0x00000001
#define FSF_DATADIR_READ 0x00000002
#define FSF_DATADIR_READ_WRITE 0x00000003
#define FSF_DATADIR_CMND 0x00000004
/* fc service class */
#define FSF_CLASS_1 0x00000001
#define FSF_CLASS_2 0x00000002
#define FSF_CLASS_3 0x00000003
struct fsf_queue_designator;
struct fsf_status_read_buffer;
struct fsf_port_closed_payload;
struct fsf_bit_error_payload;
union fsf_prot_status_qual;
struct fsf_qual_version_error;
struct fsf_qual_sequence_error;
struct fsf_qtcb_prefix;
struct fsf_qtcb_header;
struct fsf_qtcb_bottom_config;
struct fsf_qtcb_bottom_support;
struct fsf_qtcb_bottom_io;
union fsf_qtcb_bottom;
typedef struct fsf_queue_designator {
u8 cssid;
u8 chpid;
u8 hla;
u8 ua;
u32 res1;
} __attribute__ ((packed)) fsf_queue_designator_t;
typedef struct fsf_port_closed_payload {
fsf_queue_designator_t queue_designator;
u32 port_handle;
} __attribute__ ((packed)) fsf_port_closed_payload_t;
typedef struct fsf_bit_error_payload {
u32 res1;
u32 link_failure_error_count;
u32 loss_of_sync_error_count;
u32 loss_of_signal_error_count;
u32 primitive_sequence_error_count;
u32 invalid_transmission_word_error_count;
u32 crc_error_count;
u32 primitive_sequence_event_timeout_count;
u32 elastic_buffer_overrun_error_count;
u32 fcal_arbitration_timeout_count;
u32 advertised_receive_b2b_credit;
u32 current_receive_b2b_credit;
u32 advertised_transmit_b2b_credit;
u32 current_transmit_b2b_credit;
} __attribute__ ((packed)) fsf_bit_error_payload_t;
typedef struct fsf_status_read_buffer {
u32 status_type;
u32 status_subtype;
u32 length;
u32 res1;
fsf_queue_designator_t queue_designator;
u32 d_id;
u32 class;
u64 fcp_lun;
u8 res3[24];
u8 payload[FSF_STATUS_READ_PAYLOAD_SIZE];
} __attribute__ ((packed)) fsf_status_read_buffer_t;
typedef struct fsf_qual_version_error {
u32 fsf_version;
u32 res1[3];
} __attribute__ ((packed)) fsf_qual_version_error_t;
typedef struct fsf_qual_sequence_error {
u32 exp_req_seq_no;
u32 res1[3];
} __attribute__ ((packed)) fsf_qual_sequence_error_t;
typedef struct fsf_qual_locallink_error {
u32 code;
u32 res1[3];
} __attribute__ ((packed)) fsf_qual_locallink_error_t;
typedef union fsf_prot_status_qual {
fsf_qual_version_error_t version_error;
fsf_qual_sequence_error_t sequence_error;
fsf_qual_locallink_error_t locallink_error;
} __attribute__ ((packed)) fsf_prot_status_qual_t;
typedef struct fsf_qtcb_prefix {
u64 req_id;
u32 qtcb_version;
u32 ulp_info;
u32 qtcb_type;
u32 req_seq_no;
u32 prot_status;
fsf_prot_status_qual_t prot_status_qual;
u8 res1[20];
} __attribute__ ((packed)) fsf_qtcb_prefix_t;
typedef struct fsf_qtcb_header {
u64 req_handle;
u32 fsf_command;
u32 res1;
u32 port_handle;
u32 lun_handle;
u32 res2;
u32 fsf_status;
u32 fsf_status_qual[4];
// fsf_status_qual_t fsf_status_qual; FIXME: define union
u8 res3[28];
u16 log_start;
u16 log_length;
u8 res4[16];
} __attribute__ ((packed)) fsf_qtcb_header_t;
typedef u64 fsf_wwn_t;
typedef struct fsf_nport_serv_param {
u8 common_serv_param[16];
fsf_wwn_t wwpn;
fsf_wwn_t wwnn;
u8 class1_serv_param[16];
u8 class2_serv_param[16];
u8 class3_serv_param[16];
u8 class4_serv_param[16];
u8 vendor_version_level[16];
u8 res1[16];
} __attribute__ ((packed)) fsf_nport_serv_param_t;
typedef struct fsf_plogi {
u32 code;
fsf_nport_serv_param_t serv_param;
} __attribute__ ((packed)) fsf_plogi_t;
#define FSF_FCP_CMND_SIZE 288
#define FSF_FCP_RSP_SIZE 128
typedef struct fsf_qtcb_bottom_io {
u32 data_direction;
u32 service_class;
u8 res1[8];
u32 fcp_cmnd_length;
u8 res2[12];
u8 fcp_cmnd[FSF_FCP_CMND_SIZE];
u8 fcp_rsp[FSF_FCP_RSP_SIZE];
u8 res3[64];
} __attribute__ ((packed)) fsf_qtcb_bottom_io_t;
typedef struct fsf_qtcb_bottom_support {
u8 res1[16];
u32 d_id;
u32 res2;
u64 fcp_lun;
u64 res3;
u64 req_handle;
u32 service_class;
u8 res4[3];
u8 timeout;
u8 res5[184];
u32 els1_length;
u32 els2_length;
u64 res6;
u8 els[256];
} __attribute__ ((packed)) fsf_qtcb_bottom_support_t;
typedef struct fsf_qtcb_bottom_config {
u32 lic_version;
u32 res1;
u32 high_qtcb_version;
u32 low_qtcb_version;
u32 max_qtcb_size;
u8 res2[12];
u32 fc_topology;
u32 fc_link_speed;
u32 adapter_type;
u32 peer_d_id;
u8 res3[12];
u32 s_id;
fsf_nport_serv_param_t nport_serv_param;
u8 res4[320];
} __attribute__ ((packed)) fsf_qtcb_bottom_config_t;
typedef union fsf_qtcb_bottom {
fsf_qtcb_bottom_io_t io;
fsf_qtcb_bottom_support_t support;
fsf_qtcb_bottom_config_t config;
} fsf_qtcb_bottom_t;
typedef struct fsf_qtcb {
fsf_qtcb_prefix_t prefix;
fsf_qtcb_header_t header;
fsf_qtcb_bottom_t bottom;
} __attribute__ ((packed)) fsf_qtcb_t;
#endif /* FSF_H */
......@@ -16,6 +16,7 @@
#include <linux/threads.h>
#include <asm/lowcore.h>
#include <linux/sched.h>
#include <linux/cache.h>
/* entry.S is sensitive to the offsets of these fields */
typedef struct {
......
/*
* File...........: linux/include/asm-s390x/idals.h
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000a
* History of changes
* 07/24/00 new file
* 05/04/02 code restructuring.
*/
#ifndef _S390_IDALS_H
#define _S390_IDALS_H
#include <linux/config.h>
#include <linux/errno.h>
#include <asm/irq.h>
#define IDA_SIZE_LOG 12 /* 11 for 2k , 12 for 4k */
#define IDA_SIZE_LOG 11 /* 11 for 2k , 12 for 4k */
#define IDA_BLOCK_SIZE (1L<<IDA_SIZE_LOG)
static inline addr_t *
idal_alloc ( int nridaws )
/*
* Test if an address/length pair needs an idal list.
*/
static inline int
idal_is_needed(void *vaddr, unsigned int length)
{
if ( nridaws > 33 )
BUG();
return kmalloc(nridaws * sizeof(addr_t), GFP_ATOMIC | GFP_DMA );
#if defined(CONFIG_ARCH_S390X)
return ((__pa(vaddr) + length) >> 31) != 0;
#else
return 0;
#endif
}
static inline void
idal_free ( addr_t *idal )
/*
* Return the number of idal words needed for an address/length pair.
*/
static inline unsigned int
idal_nr_words(void *vaddr, unsigned int length)
{
kfree (idal);
#if defined(CONFIG_ARCH_S390X)
if (idal_is_needed(vaddr, length))
return ((__pa(vaddr) & (IDA_BLOCK_SIZE-1)) + length +
(IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG;
#endif
return 0;
}
/*
* Create the list of idal words for an address/length pair.
*/
static inline unsigned long *
idal_create_words(unsigned long *idaws, void *vaddr, unsigned int length)
{
#if defined(CONFIG_ARCH_S390X)
extern unsigned long __create_idal(unsigned long address, int count);
unsigned long paddr;
unsigned int cidaw;
paddr = __pa(vaddr);
cidaw = ((paddr & (IDA_BLOCK_SIZE-1)) + length +
(IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG;
*idaws++ = paddr;
paddr &= -IDA_BLOCK_SIZE;
while (--cidaw > 0) {
paddr += IDA_BLOCK_SIZE;
*idaws++ = paddr;
}
#endif
return idaws;
}
/*
* Function: set_normalized_cda
* sets the address of the data in CCW
* if necessary it allocates an IDAL and sets sthe appropriate flags
* Sets the address of the data in CCW.
* If necessary it allocates an IDAL and sets the appropriate flags.
*/
static inline int
set_normalized_cda(ccw1_t * ccw, unsigned long address)
set_normalized_cda(ccw1_t * ccw, void *vaddr)
{
int ret = 0;
#if defined (CONFIG_ARCH_S390X)
if (((address + ccw->count) >> 31) != 0) {
unsigned int nridaws;
unsigned long *idal;
if (ccw->flags & CCW_FLAG_IDA)
BUG();
address = __create_idal(address, ccw->count);
if (address)
return -EINVAL;
nridaws = idal_nr_words(vaddr, ccw->count);
if (nridaws > 0) {
idal = kmalloc(nridaws * sizeof(unsigned long),
GFP_ATOMIC | GFP_DMA );
if (idal == NULL)
return -ENOMEM;
idal_create_words(idal, vaddr, ccw->count);
ccw->flags |= CCW_FLAG_IDA;
else
ret = -ENOMEM;
vaddr = idal;
}
#endif
ccw->cda = (__u32) address;
return ret;
ccw->cda = (__u32)(unsigned long) vaddr;
return 0;
}
/*
* Function: clear_normalized_cda
* releases any allocated IDAL related to the CCW
* Releases any allocated IDAL related to the CCW.
*/
static inline void
clear_normalized_cda ( ccw1_t * ccw )
clear_normalized_cda(ccw1_t * ccw)
{
#if defined(CONFIG_ARCH_S390X)
if ( ccw -> flags & CCW_FLAG_IDA ) {
idal_free ( (addr_t *)(unsigned long) (ccw -> cda ));
ccw -> flags &= ~CCW_FLAG_IDA;
if (ccw->flags & CCW_FLAG_IDA) {
kfree((void *)(unsigned long) ccw->cda);
ccw->flags &= ~CCW_FLAG_IDA;
}
#endif
ccw -> cda = 0;
ccw->cda = 0;
}
#endif
......@@ -4,26 +4,4 @@
* S390 version
*/
#ifndef _S390_INIT_H
#define _S390_INIT_H
#define __init __attribute__ ((constructor))
/* don't know, if need on S390 */
#define __initdata
#define __initfunc(__arginit) \
__arginit __init; \
__arginit
/* For assembly routines
* need to define ?
*/
/*
#define __INIT .section ".text.init",#alloc,#execinstr
#define __FINIT .previous
#define __INITDATA .section ".data.init",#alloc,#write
*/
#define __cacheline_aligned __attribute__ ((__aligned__(256)))
#endif
#error "<asm/init.h> should never be used - use <linux/init.h> instead"
......@@ -40,6 +40,11 @@ extern inline void * phys_to_virt(unsigned long address)
return __io_virt(address);
}
/*
* Change "struct page" to physical address.
*/
#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT)
extern void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
extern inline void * ioremap (unsigned long offset, unsigned long size)
......
......@@ -10,14 +10,12 @@
*/
#define __MAX_SUBCHANNELS 65536
#define NR_IRQS __MAX_SUBCHANNELS
#define NR_CHPIDS 256
#define LPM_ANYPATH 0xff /* doesn't really belong here, Ingo? */
#define INVALID_STORAGE_AREA ((void *)(-1 - 0x3FFF ))
extern int disable_irq(unsigned int);
extern int enable_irq(unsigned int);
/*
* path management control word
*/
......@@ -362,6 +360,92 @@ typedef struct {
/* extended part */
ciw_t ciw[MAX_CIWS]; /* variable # of CIWs */
} __attribute__ ((packed,aligned(4))) senseid_t;
/*
* where we put the ssd info
*/
typedef struct _ssd_info {
__u8 valid:1;
__u8 type:7; /* subchannel type */
__u8 chpid[8]; /* chpids */
__u16 fla[8]; /* full link addresses */
} __attribute__ ((packed)) ssd_info_t;
/*
* area for store event information
*/
typedef struct chsc_area_t {
struct {
/* word 0 */
__u16 command_code1;
__u16 command_code2;
union {
struct {
/* word 1 */
__u32 reserved1;
/* word 2 */
__u32 reserved2;
} __attribute__ ((packed,aligned(8))) sei_req;
struct {
/* word 1 */
__u16 reserved1;
__u16 f_sch; /* first subchannel */
/* word 2 */
__u16 reserved2;
__u16 l_sch; /* last subchannel */
} __attribute__ ((packed,aligned(8))) ssd_req;
} request_block_data;
/* word 3 */
__u32 reserved3;
} __attribute__ ((packed,aligned(8))) request_block;
struct {
/* word 0 */
__u16 length;
__u16 response_code;
/* word 1 */
__u32 reserved1;
union {
struct {
/* word 2 */
__u8 flags;
__u8 vf; /* validity flags */
__u8 rs; /* reporting source */
__u8 cc; /* content code */
/* word 3 */
__u16 fla; /* full link address */
__u16 rsid; /* reporting source id */
/* word 4 */
__u32 reserved2;
/* word 5 */
__u32 reserved3;
/* word 6 */
__u32 ccdf; /* content-code dependent field */
/* word 7 */
__u32 reserved4;
/* word 8 */
__u32 reserved5;
/* word 9 */
__u32 reserved6;
} __attribute__ ((packed,aligned(8))) sei_res;
struct {
/* word 2 */
__u8 sch_valid : 1;
__u8 dev_valid : 1;
__u8 st : 3; /* subchannel type */
__u8 zeroes : 3;
__u8 unit_addr; /* unit address */
__u16 devno; /* device number */
/* word 3 */
__u8 path_mask;
__u8 fla_valid_mask;
__u16 sch; /* subchannel */
/* words 4-5 */
__u8 chpid[8]; /* chpids 0-7 */
/* words 6-9 */
__u16 fla[8]; /* full link addresses 0-7 */
} __attribute__ ((packed,aligned(8))) ssd_res;
} response_block_data;
} __attribute__ ((packed,aligned(8))) response_block;
} __attribute__ ((packed,aligned(PAGE_SIZE))) chsc_area_t;
#endif /* __KERNEL__ */
/*
......@@ -491,6 +575,7 @@ typedef struct {
/* ... for suspended CCWs */
#define DOIO_TIMEOUT 0x0080 /* 3 secs. timeout for sync. I/O */
#define DOIO_DONT_CALL_INTHDLR 0x0100 /* don't call interrupt handler */
#define DOIO_CANCEL_ON_TIMEOUT 0x0200 /* cancel I/O if it timed out */
/*
* do_IO()
......@@ -513,11 +598,6 @@ int do_IO( int irq, /* IRQ aka. subchannel number */
__u8 lpm, /* logical path mask */
unsigned long flag); /* flags : see above */
int start_IO( int irq, /* IRQ aka. subchannel number */
ccw1_t *cpa, /* logical channel program address */
unsigned long intparm, /* interruption parameter */
__u8 lpm, /* logical path mask */
unsigned int flag); /* flags : see above */
void do_crw_pending( void ); /* CRW handler */
......@@ -531,14 +611,6 @@ int clear_IO( int irq, /* IRQ aka. subchannel number */
unsigned long intparm, /* dummy intparm */
unsigned long flag); /* possible DOIO_WAIT_FOR_INTERRUPT */
int process_IRQ( struct pt_regs regs,
unsigned int irq,
unsigned int intparm);
int enable_cpu_sync_isc ( int irq );
int disable_cpu_sync_isc( int irq );
typedef struct {
int irq; /* irq, aka. subchannel */
__u16 devno; /* device number */
......@@ -546,8 +618,6 @@ typedef struct {
senseid_t sid_data; /* senseID data */
} s390_dev_info_t;
int get_dev_info( int irq, s390_dev_info_t *); /* to be eliminated - don't use */
int get_dev_info_by_irq ( int irq, s390_dev_info_t *pdi);
int get_dev_info_by_devno( __u16 devno, s390_dev_info_t *pdi);
......@@ -560,8 +630,6 @@ int get_irq_next ( int irq );
int read_dev_chars( int irq, void **buffer, int length );
int read_conf_data( int irq, void **buffer, int *length, __u8 lpm );
int s390_DevicePathVerification( int irq, __u8 domask );
int s390_request_irq_special( int irq,
io_handler_func_t io_handler,
not_oper_handler_func_t not_oper_handler,
......@@ -570,7 +638,6 @@ int s390_request_irq_special( int irq,
void *dev_id);
extern int set_cons_dev(int irq);
extern int reset_cons_dev(int irq);
extern int wait_cons_dev(int irq);
extern schib_t *s390_get_schib( int irq );
......@@ -630,11 +697,6 @@ extern __inline__ int msch_err(int irq, volatile schib_t *addr)
" .align 8\n"
" .quad 0b,2b\n"
".previous"
" lr 1,%1\n"
" msch 0(%2)\n"
"0: ipm %0\n"
" srl %0,28\n"
"1:\n"
#else
".section .fixup,\"ax\"\n"
"2: l %0,%3\n"
......@@ -743,6 +805,21 @@ extern __inline__ int hsch(int irq)
return ccode;
}
extern __inline__ int xsch(int irq)
{
int ccode;
__asm__ __volatile__(
" lr 1,%1\n"
" .insn rre,0xb2760000,%1,0\n"
" ipm %0\n"
" srl %0,28"
: "=d" (ccode)
: "d" (irq | 0x10000L)
: "cc", "1" );
return ccode;
}
extern __inline__ int iac( void)
{
int ccode;
......@@ -805,6 +882,20 @@ extern __inline__ int diag210( diag210_t * addr)
: "cc" );
return ccode;
}
extern __inline__ int chsc( chsc_area_t * chsc_area)
{
int cc;
__asm__ __volatile__ (
".insn rre,0xb25f0000,%1,0 \n\t"
"ipm %0 \n\t"
"srl %0,28 \n\t"
: "=d" (cc)
: "d" (chsc_area)
: "cc" );
return cc;
}
/*
* Various low-level irq details needed by irq.c, process.c,
......@@ -813,13 +904,6 @@ extern __inline__ int diag210( diag210_t * addr)
* Interrupt entry/exit code at both C and assembly level
*/
void mask_irq(unsigned int irq);
void unmask_irq(unsigned int irq);
#define MAX_IRQ_SOURCES 128
extern spinlock_t irq_controller_lock;
#ifdef CONFIG_SMP
#include <asm/atomic.h>
......@@ -849,17 +933,10 @@ static inline void irq_exit(int cpu, unsigned int irq)
#define __STR(x) #x
#define STR(x) __STR(x)
#ifdef CONFIG_SMP
/*
* SMP has a few special interrupts for IPI messages
*/
#endif /* CONFIG_SMP */
/*
* x86 profiling function, SMP safe. We might want to do this in
* assembly totally?
* is this ever used anyway?
*/
extern char _stext;
static inline void s390_do_profile (unsigned long addr)
......@@ -883,16 +960,19 @@ static inline void s390_do_profile (unsigned long addr)
#include <asm/s390io.h>
#define get_irq_lock(irq) &ioinfo[irq]->irq_lock
#define s390irq_spin_lock(irq) \
spin_lock(&(ioinfo[irq]->irq_lock))
spin_lock(get_irq_lock(irq))
#define s390irq_spin_unlock(irq) \
spin_unlock(&(ioinfo[irq]->irq_lock))
spin_unlock(get_irq_lock(irq))
#define s390irq_spin_lock_irqsave(irq,flags) \
spin_lock_irqsave(&(ioinfo[irq]->irq_lock), flags)
spin_lock_irqsave(get_irq_lock(irq), flags)
#define s390irq_spin_unlock_irqrestore(irq,flags) \
spin_unlock_irqrestore(&(ioinfo[irq]->irq_lock), flags)
spin_unlock_irqrestore(get_irq_lock(irq), flags)
#define touch_nmi_watchdog() do { } while(0)
......
......@@ -45,6 +45,9 @@
#define __LC_CPUID 0xC60
#define __LC_CPUADDR 0xC68
#define __LC_IPLDEV 0xC7C
#define __LC_JIFFY_TIMER 0xC80
#define __LC_PANIC_MAGIC 0xE00
#define __LC_PFAULT_INTPARM 0x080
......@@ -161,7 +164,7 @@ struct _lowcore
/* entry.S sensitive area end */
/* SMP info area: defined by DJB */
__u64 jiffy_timer_cc; /* 0xc80 */
__u64 jiffy_timer; /* 0xc80 */
atomic_t ext_call_fast; /* 0xc88 */
__u8 pad11[0xe00-0xc8c]; /* 0xc8c */
......@@ -182,12 +185,12 @@ extern __inline__ void set_prefix(__u32 address)
extern struct _lowcore *lowcore_ptr[];
#ifndef CONFIG_SMP
#define get_cpu_lowcore(cpu) S390_lowcore
#define safe_get_cpu_lowcore(cpu) S390_lowcore
#define get_cpu_lowcore(cpu) (&S390_lowcore)
#define safe_get_cpu_lowcore(cpu) (&S390_lowcore)
#else
#define get_cpu_lowcore(cpu) (*lowcore_ptr[cpu])
#define get_cpu_lowcore(cpu) (lowcore_ptr[(cpu)])
#define safe_get_cpu_lowcore(cpu) \
((cpu)==smp_processor_id() ? S390_lowcore:(*lowcore_ptr[(cpu)]))
((cpu) == smp_processor_id() ? &S390_lowcore : lowcore_ptr[(cpu)])
#endif
#endif /* __ASSEMBLY__ */
......
......@@ -12,6 +12,7 @@
#define PROT_READ 0x1 /* page can be read */
#define PROT_WRITE 0x2 /* page can be written */
#define PROT_EXEC 0x4 /* page can be executed */
#define PROT_SEM 0x8 /* page may be used for atomic ops */
#define PROT_NONE 0x0 /* page can not be accessed */
#define MAP_SHARED 0x01 /* Share changes */
......
......@@ -59,8 +59,8 @@ static inline void copy_page(void *to, void *from)
: "memory" );
}
#define clear_user_page(page, vaddr) clear_page(page)
#define copy_user_page(to, from, vaddr) copy_page(to, from)
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
#define BUG() do { \
printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
......@@ -116,9 +116,13 @@ typedef struct { unsigned long pgprot; } pgprot_t;
#define __PAGE_OFFSET 0x0UL
#define PAGE_OFFSET 0x0UL
#define __pa(x) (unsigned long)(x)
#define __va(x) (void *)(x)
#define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT))
#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
#define __va(x) (void *)(unsigned long)(x)
#define pfn_to_page(pfn) (mem_map + (pfn))
#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
......
......@@ -4,6 +4,7 @@
/* S/390 systems don't have a PCI bus. This file is just here because some stupid .c code
* includes it even if CONFIG_PCI is not set.
*/
#define PCI_DMA_BUS_IS_PHYS (1)
#endif /* __ASM_S390_PCI_H */
#ifndef __ARCH_S390_PERCPU__
#define __ARCH_S390_PERCPU__
#include <asm-generic/percpu.h>
#endif /* __ARCH_S390_PERCPU__ */
......@@ -17,10 +17,7 @@
#include <asm/processor.h>
#include <linux/threads.h>
#define pgd_quicklist (S390_lowcore.cpu_data.pgd_quick)
#define pmd_quicklist ((unsigned long *)0)
#define pte_quicklist (S390_lowcore.cpu_data.pte_quick)
#define pgtable_cache_size (S390_lowcore.cpu_data.pgtable_cache_sz)
#define check_pgt_cache() do {} while (0)
/*
* Allocate and free page tables. The xxx_kernel() versions are
......@@ -28,67 +25,35 @@
* if any.
*/
extern __inline__ pgd_t* get_pgd_slow(void)
static inline pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *ret;
pgd_t *pgd;
int i;
ret = (pgd_t *) __get_free_pages(GFP_KERNEL,1);
if (ret != NULL)
pgd = (pgd_t *) __get_free_pages(GFP_KERNEL,1);
if (pgd != NULL)
for (i = 0; i < USER_PTRS_PER_PGD; i++)
pmd_clear(pmd_offset(ret + i, i*PGDIR_SIZE));
return ret;
}
extern __inline__ pgd_t* get_pgd_fast(void)
{
unsigned long *ret = pgd_quicklist;
if (ret != NULL) {
pgd_quicklist = (unsigned long *)(*ret);
ret[0] = ret[1];
pgtable_cache_size -= 2;
}
return (pgd_t *)ret;
}
extern __inline__ pgd_t *pgd_alloc(struct mm_struct *mm)
{
pgd_t *pgd;
pgd = get_pgd_fast();
if (!pgd)
pgd = get_pgd_slow();
pmd_clear(pmd_offset(pgd + i, i*PGDIR_SIZE));
return pgd;
}
extern __inline__ void free_pgd_fast(pgd_t *pgd)
{
*(unsigned long *)pgd = (unsigned long) pgd_quicklist;
pgd_quicklist = (unsigned long *) pgd;
pgtable_cache_size += 2;
}
extern __inline__ void free_pgd_slow(pgd_t *pgd)
static inline void pgd_free(pgd_t *pgd)
{
free_pages((unsigned long) pgd, 1);
}
#define pgd_free(pgd) free_pgd_fast(pgd)
/*
* page middle directory allocation/free routines.
* We don't use pmd cache, so these are dummy routines. This
* code never triggers because the pgd will always be present.
*/
#define pmd_alloc_one_fast(mm, address) ({ BUG(); ((pmd_t *)1); })
#define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); })
#define pmd_free(x) do { } while (0)
#define pmd_free_slow(x) do { } while (0)
#define pmd_free_fast(x) do { } while (0)
#define pmd_free_tlb(tlb,x) do { } while (0)
#define pgd_populate(mm, pmd, pte) BUG()
extern inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
{
pmd_val(pmd[0]) = _PAGE_TABLE + __pa(pte);
pmd_val(pmd[1]) = _PAGE_TABLE + __pa(pte+256);
......@@ -96,50 +61,53 @@ extern inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
pmd_val(pmd[3]) = _PAGE_TABLE + __pa(pte+768);
}
static inline void
pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
{
pmd_populate_kernel(mm, pmd, (pte_t *)((page-mem_map) << PAGE_SHIFT));
}
/*
* page table entry allocation/free routines.
*/
extern inline pte_t * pte_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr)
{
pte_t *pte;
int count;
int i;
count = 0;
do {
pte = (pte_t *) __get_free_page(GFP_KERNEL);
if (pte != NULL) {
for (i=0; i < PTRS_PER_PTE; i++)
pte_clear(pte+i);
} else {
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ);
}
} while (!pte && (count++ < 10));
return pte;
}
extern __inline__ pte_t *
pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
static inline struct page *
pte_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
{
unsigned long *ret = (unsigned long *) pte_quicklist;
if (ret != NULL) {
pte_quicklist = (unsigned long *)(*ret);
ret[0] = ret[1];
pgtable_cache_size--;
}
return (pte_t *)ret;
return virt_to_page(pte_alloc_one_kernel(mm, vmaddr));
}
extern __inline__ void pte_free_fast(pte_t *pte)
static inline void pte_free_kernel(pte_t *pte)
{
*(unsigned long *)pte = (unsigned long) pte_quicklist;
pte_quicklist = (unsigned long *) pte;
pgtable_cache_size++;
free_page((unsigned long) pte);
}
extern __inline__ void pte_free_slow(pte_t *pte)
static inline void pte_free(struct page *pte)
{
free_page((unsigned long) pte);
__free_page(pte);
}
#define pte_free(pte) pte_free_fast(pte)
extern int do_check_pgt_cache(int, int);
#define pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
/*
* This establishes kernel virtual mappings (e.g., as a result of a
......@@ -148,151 +116,6 @@ extern int do_check_pgt_cache(int, int);
*/
#define set_pgdir(addr,entry) do { } while(0)
/*
* TLB flushing:
*
* - flush_tlb() flushes the current mm struct TLBs
* - flush_tlb_all() flushes all processes TLBs
* called only from vmalloc/vfree
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
*/
/*
* S/390 has three ways of flushing TLBs
* 'ptlb' does a flush of the local processor
* 'csp' flushes the TLBs on all PUs of a SMP
* 'ipte' invalidates a pte in a page table and flushes that out of
* the TLBs of all PUs of a SMP
*/
#define local_flush_tlb() \
do { __asm__ __volatile__("ptlb": : :"memory"); } while (0)
#ifndef CONFIG_SMP
/*
* We always need to flush, since s390 does not flush tlb
* on each context switch
*/
static inline void flush_tlb(void)
{
local_flush_tlb();
}
static inline void flush_tlb_all(void)
{
local_flush_tlb();
}
static inline void flush_tlb_mm(struct mm_struct *mm)
{
local_flush_tlb();
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
local_flush_tlb();
}
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
local_flush_tlb();
}
#else
#include <asm/smp.h>
extern void smp_ptlb_all(void);
static inline void global_flush_tlb_csp(void)
{
int cs1=0,dum=0;
int *adr;
long long dummy=0;
adr = (int*) (((int)(((int*) &dummy)+1) & 0xfffffffc)|1);
__asm__ __volatile__("lr 2,%0\n\t"
"lr 3,%1\n\t"
"lr 4,%2\n\t"
"csp 2,4" :
: "d" (cs1), "d" (dum), "d" (adr)
: "2", "3", "4");
}
static inline void global_flush_tlb(void)
{
if (MACHINE_HAS_CSP)
global_flush_tlb_csp();
else
smp_ptlb_all();
}
/*
* We only have to do global flush of tlb if process run since last
* flush on any other pu than current.
* If we have threads (mm->count > 1) we always do a global flush,
* since the process runs on more than one processor at the same time.
*/
static inline void __flush_tlb_mm(struct mm_struct * mm)
{
if ((smp_num_cpus > 1) &&
((atomic_read(&mm->mm_count) != 1) ||
(mm->cpu_vm_mask != (1UL << smp_processor_id())))) {
mm->cpu_vm_mask = (1UL << smp_processor_id());
global_flush_tlb();
} else {
local_flush_tlb();
}
}
static inline void flush_tlb(void)
{
__flush_tlb_mm(current->mm);
}
static inline void flush_tlb_all(void)
{
global_flush_tlb();
}
static inline void flush_tlb_mm(struct mm_struct *mm)
{
__flush_tlb_mm(mm);
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
__flush_tlb_mm(vma->vm_mm);
}
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
__flush_tlb_mm(vma->vm_mm);
}
#endif
extern inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
/* S/390 does not keep any page table caches in TLB */
}
static inline int ptep_test_and_clear_and_flush_young(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
/* No need to flush TLB; bits are in storage key */
return ptep_test_and_clear_young(ptep);
}
static inline int ptep_test_and_clear_and_flush_dirty(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
/* No need to flush TLB; bits are in storage key */
return ptep_test_and_clear_dirty(ptep);
}
static inline pte_t ptep_invalidate(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
......
......@@ -33,17 +33,6 @@
extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
extern void paging_init(void);
/* Caches aren't brain-dead on S390. */
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
/*
* The S390 doesn't have any external MMU info: the kernel page
* tables contain all the necessary information.
......@@ -156,7 +145,8 @@ extern char empty_zero_page[PAGE_SIZE];
/* Bits in the page table entry */
#define _PAGE_PRESENT 0x001 /* Software */
#define _PAGE_MKCLEAR 0x002 /* Software */
#define _PAGE_MKCLEAN 0x002 /* Software */
#define _PAGE_ISCLEAN 0x004 /* Software */
#define _PAGE_RO 0x200 /* HW read-only */
#define _PAGE_INVALID 0x400 /* HW invalid */
......@@ -190,9 +180,11 @@ extern char empty_zero_page[PAGE_SIZE];
* No mapping available
*/
#define PAGE_INVALID __pgprot(_PAGE_INVALID)
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID)
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_RO)
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_RO)
#define PAGE_NONE_SHARED __pgprot(_PAGE_PRESENT|_PAGE_INVALID)
#define PAGE_NONE_PRIVATE __pgprot(_PAGE_PRESENT|_PAGE_INVALID|_PAGE_ISCLEAN)
#define PAGE_RO_SHARED __pgprot(_PAGE_PRESENT|_PAGE_RO)
#define PAGE_RO_PRIVATE __pgprot(_PAGE_PRESENT|_PAGE_RO|_PAGE_ISCLEAN)
#define PAGE_COPY __pgprot(_PAGE_PRESENT|_PAGE_RO|_PAGE_ISCLEAN)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT)
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT)
......@@ -202,21 +194,21 @@ extern char empty_zero_page[PAGE_SIZE];
* the closest we can get..
*/
/*xwr*/
#define __P000 PAGE_NONE
#define __P001 PAGE_READONLY
#define __P000 PAGE_NONE_PRIVATE
#define __P001 PAGE_RO_PRIVATE
#define __P010 PAGE_COPY
#define __P011 PAGE_COPY
#define __P100 PAGE_READONLY
#define __P101 PAGE_READONLY
#define __P100 PAGE_RO_PRIVATE
#define __P101 PAGE_RO_PRIVATE
#define __P110 PAGE_COPY
#define __P111 PAGE_COPY
#define __S000 PAGE_NONE
#define __S001 PAGE_READONLY
#define __S000 PAGE_NONE_SHARED
#define __S001 PAGE_RO_SHARED
#define __S010 PAGE_SHARED
#define __S011 PAGE_SHARED
#define __S100 PAGE_READONLY
#define __S101 PAGE_READONLY
#define __S100 PAGE_RO_SHARED
#define __S101 PAGE_RO_SHARED
#define __S110 PAGE_SHARED
#define __S111 PAGE_SHARED
......@@ -227,10 +219,10 @@ extern char empty_zero_page[PAGE_SIZE];
*/
extern inline void set_pte(pte_t *pteptr, pte_t pteval)
{
if ((pte_val(pteval) & (_PAGE_MKCLEAR|_PAGE_INVALID))
== _PAGE_MKCLEAR)
if ((pte_val(pteval) & (_PAGE_MKCLEAN|_PAGE_INVALID))
== _PAGE_MKCLEAN)
{
pte_val(pteval) &= ~_PAGE_MKCLEAR;
pte_val(pteval) &= ~_PAGE_MKCLEAN;
asm volatile ("sske %0,%1"
: : "d" (0), "a" (pte_val(pteval)));
......@@ -239,8 +231,6 @@ extern inline void set_pte(pte_t *pteptr, pte_t pteval)
*pteptr = pteval;
}
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
/*
* pgd/pmd/pte query functions
*/
......@@ -277,6 +267,8 @@ extern inline int pte_dirty(pte_t pte)
{
int skey;
if (pte_val(pte) & _PAGE_ISCLEAN)
return 0;
asm volatile ("iske %0,%1" : "=d" (skey) : "a" (pte_val(pte)));
return skey & _PAGE_CHANGED;
}
......@@ -307,15 +299,14 @@ extern inline void pte_clear(pte_t *ptep)
pte_val(*ptep) = _PAGE_INVALID;
}
#define PTE_INIT(x) pte_clear(x)
/*
* The following pte modification functions only work if
* pte_present() is true. Undefined behaviour if not..
*/
extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pte_val(pte) = (pte_val(pte) & PAGE_MASK) | pgprot_val(newprot);
pte_val(pte) &= PAGE_MASK | _PAGE_ISCLEAN;
pte_val(pte) |= pgprot_val(newprot) & ~_PAGE_ISCLEAN;
return pte;
}
......@@ -342,13 +333,11 @@ extern inline pte_t pte_mkclean(pte_t pte)
extern inline pte_t pte_mkdirty(pte_t pte)
{
/* We can't set the changed bit atomically. For now we
* set (!) the page referenced bit. */
asm volatile ("sske %0,%1"
: : "d" (_PAGE_CHANGED|_PAGE_REFERENCED),
"a" (pte_val(pte)));
pte_val(pte) &= ~_PAGE_MKCLEAR;
/* We do not explicitly set the dirty bit because the
* sske instruction is slow. It is faster to let the
* next instruction set the dirty bit.
*/
pte_val(pte) &= ~(_PAGE_MKCLEAN | _PAGE_ISCLEAN);
return pte;
}
......@@ -382,6 +371,8 @@ static inline int ptep_test_and_clear_dirty(pte_t *ptep)
{
int skey;
if (pte_val(*ptep) & _PAGE_ISCLEAN)
return 0;
asm volatile ("iske %0,%1" : "=d" (skey) : "a" (*ptep));
if ((skey & _PAGE_CHANGED) == 0)
return 0;
......@@ -414,7 +405,7 @@ static inline void ptep_mkdirty(pte_t *ptep)
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*/
extern inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
static inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
{
pte_t __pte;
pte_val(__pte) = physpage + pgprot_val(pgprot);
......@@ -424,24 +415,41 @@ extern inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
#define mk_pte(pg, pgprot) \
({ \
struct page *__page = (pg); \
pgprot_t __pgprot = (pgprot); \
unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT); \
pte_t __pte = mk_pte_phys(__physpage, (pgprot)); \
pte_t __pte = mk_pte_phys(__physpage, __pgprot); \
\
if (__page != ZERO_PAGE(__physpage)) { \
int __users = page_count(__page); \
__users -= !!PagePrivate(__page) + !!__page->mapping; \
\
if (__users == 1) \
pte_val(__pte) |= _PAGE_MKCLEAR; \
if (!(pgprot_val(__pgprot) & _PAGE_ISCLEAN)) { \
int __users = !!PagePrivate(__page) + !!__page->mapping; \
if (__users + page_count(__page) == 1) \
pte_val(__pte) |= _PAGE_MKCLEAN; \
} \
__pte; \
})
#define pfn_pte(pfn, pgprot) \
({ \
struct page *__page = mem_map+(pfn); \
pgprot_t __pgprot = (pgprot); \
unsigned long __physpage = __pa((pfn) << PAGE_SHIFT); \
pte_t __pte = mk_pte_phys(__physpage, __pgprot); \
\
if (!(pgprot_val(__pgprot) & _PAGE_ISCLEAN)) { \
int __users = !!PagePrivate(__page) + !!__page->mapping; \
if (__users + page_count(__page) == 1) \
pte_val(__pte) |= _PAGE_MKCLEAN; \
} \
__pte; \
})
#define pte_page(x) (mem_map+(unsigned long)((pte_val(x) >> PAGE_SHIFT)))
#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
#define pte_page(x) pfn_to_page(pte_pfn(x))
#define pmd_page_kernel(pmd) (pmd_val(pmd) & PAGE_MASK)
#define pmd_page(pmd) (mem_map+(pmd_val(pmd) >> PAGE_SHIFT))
#define pmd_page(pmd) \
((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
#define pgd_page_kernel(pgd) (pgd_val(pgd) & PAGE_MASK)
/* to find an entry in a page-table-directory */
#define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
......@@ -457,8 +465,13 @@ extern inline pmd_t * pmd_offset(pgd_t * dir, unsigned long address)
}
/* Find an entry in the third-level page table.. */
#define pte_offset(pmd, address) \
((pte_t *) (pmd_page(*pmd) + ((address>>10) & ((PTRS_PER_PTE-1)<<2))))
#define __pte_offset(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
#define pte_offset_kernel(pmd, address) \
((pte_t *) pmd_page_kernel(*(pmd)) + __pte_offset(address))
#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
#define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address)
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
/*
* A page-table entry has some bits we have to treat in a special way.
......
......@@ -16,6 +16,7 @@
#include <asm/page.h>
#include <asm/ptrace.h>
#ifdef __KERNEL__
/*
* Default implementation of macro that returns current
* instruction pointer ("program counter").
......@@ -61,11 +62,8 @@ extern struct task_struct *last_task_used_math;
*/
#define TASK_UNMAPPED_BASE (TASK_SIZE / 2)
#define THREAD_SIZE (2*PAGE_SIZE)
typedef struct {
unsigned long seg;
unsigned long acc4;
__u32 ar4;
} mm_segment_t;
/* if you change the thread_struct structure, you must
......@@ -74,8 +72,6 @@ typedef struct {
struct thread_struct
{
struct pt_regs *regs; /* the user registers can be found on*/
s390_fp_regs fp_regs;
__u32 ar2; /* kernel access register 2 */
__u32 ar4; /* kernel access register 4 */
......@@ -84,8 +80,6 @@ struct thread_struct
__u32 error_code; /* error-code of last prog-excep. */
__u32 prot_addr; /* address of protection-excep. */
__u32 trap_no;
/* perform syscall argument validation (get/set_fs) */
mm_segment_t fs;
per_struct per_info;/* Must be aligned on an 4 byte boundary*/
/* Used to give failing instruction back to user for ieee exceptions */
addr_t ieee_instruction_pointer;
......@@ -95,14 +89,12 @@ struct thread_struct
typedef struct thread_struct thread_struct;
#define INIT_THREAD { (struct pt_regs *) 0, \
{ 0,{{0},{0},{0},{0},{0},{0},{0},{0},{0},{0}, \
#define INIT_THREAD {{0,{{0},{0},{0},{0},{0},{0},{0},{0},{0},{0}, \
{0},{0},{0},{0},{0},{0}}}, \
0, 0, \
sizeof(init_stack) + (__u32) &init_stack, \
(__pa((__u32) &swapper_pg_dir[0]) + _SEGMENT_TABLE),\
0,0,0, \
(mm_segment_t) { 0,1}, \
(per_struct) {{{{0,}}},0,0,0,0,{{0,}}}, \
0, 0 \
}
......@@ -115,6 +107,7 @@ typedef struct thread_struct thread_struct;
} while (0)
/* Forward declaration, a strange C thing */
struct task_struct;
struct mm_struct;
/* Free all resources held by a thread. */
......@@ -126,28 +119,20 @@ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
#define release_segments(mm) do { } while (0)
/*
* Return saved PC of a blocked thread. used in kernel/sched
* Return saved PC of a blocked thread.
*/
extern inline unsigned long thread_saved_pc(struct thread_struct *t)
{
return (t->regs) ? ((unsigned long)t->regs->psw.addr) : 0;
}
unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) ((tsk)->thread.regs->psw.addr)
#define KSTK_ESP(tsk) ((tsk)->thread.ksp)
extern unsigned long thread_saved_pc(struct task_struct *t);
/* Allocation and freeing of basic task resources. */
/*
* NOTE! The task struct and the stack go together
* Print register of task into buffer. Used in fs/proc/array.c.
*/
#define alloc_task_struct() \
((struct task_struct *) __get_free_pages(GFP_KERNEL,1))
#define free_task_struct(p) free_pages((unsigned long)(p),1)
#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count)
extern char *task_show_regs(struct task_struct *task, char *buffer);
#define init_task (init_task_union.task)
#define init_stack (init_task_union.stack)
unsigned long get_wchan(struct task_struct *p);
#define __KSTK_PTREGS(tsk) ((struct pt_regs *) \
(((addr_t) tsk->thread_info + THREAD_SIZE - sizeof(struct pt_regs)) & -8L))
#define KSTK_EIP(tsk) (__KSTK_PTREGS(tsk)->psw.addr)
#define KSTK_ESP(tsk) (__KSTK_PTREGS(tsk)->gprs[15])
#define cpu_relax() do { } while (0)
......@@ -164,6 +149,46 @@ unsigned long get_wchan(struct task_struct *p);
#define USER_STD_MASK 0x00000080UL
#define PSW_PROBLEM_STATE 0x00010000UL
/*
* Set PSW mask to specified value, while leaving the
* PSW addr pointing to the next instruction.
*/
static inline void __load_psw_mask (unsigned long mask)
{
unsigned long addr;
psw_t psw;
psw.mask = mask;
asm volatile (
" basr %0,0\n"
"0: ahi %0,1f-0b\n"
" st %0,4(%1)\n"
" lpsw 0(%1)\n"
"1:"
: "=&d" (addr) : "a" (&psw) : "memory", "cc" );
}
/*
* Function to stop a processor until an interruption occured
*/
static inline void enabled_wait(void)
{
unsigned long reg;
psw_t wait_psw;
wait_psw.mask = 0x070e0000;
asm volatile (
" basr %0,0\n"
"0: la %0,1f-0b(%0)\n"
" st %0,4(%1)\n"
" oi 4(%1),0x80\n"
" lpsw 0(%1)\n"
"1:"
: "=&a" (reg) : "a" (&wait_psw) : "memory", "cc" );
}
/*
* Function to drop a processor into disabled wait state
*/
......@@ -200,4 +225,6 @@ static inline void disabled_wait(unsigned long code)
: : "a" (dw_psw), "a" (&ctl_buf) : "cc" );
}
#endif
#endif /* __ASM_S390_PROCESSOR_H */
......@@ -105,12 +105,16 @@
#define STACK_FRAME_OVERHEAD 96 /* size of minimum stack frame */
#define PTRACE_SETOPTIONS 21
/* options set using PTRACE_SETOPTIONS */
#define PTRACE_O_TRACESYSGOOD 0x00000001
#ifndef __ASSEMBLY__
#include <linux/config.h>
#include <linux/stddef.h>
#include <linux/types.h>
#include <asm/current.h>
#include <asm/setup.h>
/* this typedef defines how a Program Status Word looks like */
......@@ -118,7 +122,7 @@ typedef struct
{
__u32 mask;
__u32 addr;
} psw_t __attribute__ ((aligned(8)));
} __attribute__ ((aligned(8))) psw_t;
#ifdef __KERNEL__
#define FIX_PSW(addr) ((unsigned long)(addr)|0x80000000UL)
......@@ -150,8 +154,8 @@ typedef struct
#define FPC_VALID_MASK 0xF8F8FF03
/*
* The first entries in pt_regs, gdb_pt_regs and user_regs_struct
* are common for all three structures. The s390_regs structure
* The first entries in pt_regs and user_regs_struct
* are common for the two structures. The s390_regs structure
* covers the common parts. It simplifies copying the common part
* between the three structures.
*/
......@@ -174,34 +178,15 @@ struct pt_regs
__u32 acrs[NUM_ACRS];
__u32 orig_gpr2;
__u32 trap;
__u32 old_ilc;
};
/*
* The gdb_pt_regs struct is used instead of the pt_regs structure
* if kernel remote debugging is used.
*/
#if CONFIG_REMOTE_DEBUG
struct gdb_pt_regs
{
psw_t psw;
__u32 gprs[NUM_GPRS];
__u32 acrs[NUM_ACRS];
__u32 orig_gpr2;
__u32 trap;
__u32 crs[16];
s390_fp_regs fp_regs;
__u32 old_ilc;
};
#endif
/*
* Now for the program event recording (trace) definitions.
*/
typedef struct
{
__u32 cr[3];
} per_cr_words __attribute__((packed));
} per_cr_words;
#define PER_EM_MASK 0xE8000000
......@@ -223,14 +208,14 @@ typedef struct
unsigned : 21;
addr_t starting_addr;
addr_t ending_addr;
} per_cr_bits __attribute__((packed));
} per_cr_bits;
typedef struct
{
__u16 perc_atmid; /* 0x096 */
__u32 address; /* 0x098 */
__u8 access_id; /* 0x0a1 */
} per_lowcore_words __attribute__((packed));
} per_lowcore_words;
typedef struct
{
......@@ -249,14 +234,14 @@ typedef struct
addr_t address; /* 0x098 */
unsigned : 4; /* 0x0a1 */
unsigned access_id : 4;
} per_lowcore_bits __attribute__((packed));
} per_lowcore_bits;
typedef struct
{
union {
per_cr_words words;
per_cr_bits bits;
} control_regs __attribute__((packed));
} control_regs;
/*
* Use these flags instead of setting em_instruction_fetch
* directly they are used so that single stepping can be
......@@ -275,7 +260,7 @@ typedef struct
per_lowcore_words words;
per_lowcore_bits bits;
} lowcore;
} per_struct __attribute__((packed));
} per_struct;
typedef struct
{
......@@ -294,6 +279,7 @@ typedef struct
#define PTRACE_PEEKDATA_AREA 0x5003
#define PTRACE_POKETEXT_AREA 0x5004
#define PTRACE_POKEDATA_AREA 0x5005
/*
* PT_PROT definition is loosely based on hppa bsd definition in
* gdb/hppab-nat.c
......@@ -345,7 +331,6 @@ struct user_regs_struct
#define user_mode(regs) (((regs)->psw.mask & PSW_PROBLEM_STATE) != 0)
#define instruction_pointer(regs) ((regs)->psw.addr)
extern void show_regs(struct pt_regs * regs);
extern char *task_show_regs(struct task_struct *task, char *buffer);
#endif
#endif /* __ASSEMBLY__ */
......
/*
* linux/include/asm/qdio.h
*
* Linux for S/390 QDIO base support, Hipersocket base support
* version 2
*
* Copyright 2000,2002 IBM Corporation
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>
*
*/
#ifndef __QDIO_H__
#define __QDIO_H__
#define VERSION_QDIO_H "$Revision: 1.57 $"
/* note, that most of the typedef's are from ingo. */
#include <linux/interrupt.h>
//#define QDIO_DBF_LIKE_HELL
#define QDIO_NAME "qdio "
#define QDIO_VERBOSE_LEVEL 9
#ifndef CONFIG_ARCH_S390X
#define QDIO_32_BIT
#endif /* CONFIG_ARCH_S390X */
#define QDIO_USE_PROCESSING_STATE
#ifdef CONFIG_QDIO_PERF_STATS
#define QDIO_PERFORMANCE_STATS
#endif /* CONFIG_QDIO_PERF_STATS */
#define QDIO_MINIMAL_BH_RELIEF_TIME 16
#define QDIO_TIMER_POLL_VALUE 1
#define IQDIO_TIMER_POLL_VALUE 1
/**** CONSTANTS, that are relied on without using these symbols *****/
#define QDIO_MAX_QUEUES_PER_IRQ 32 /* used in width of unsigned int */
/************************ END of CONSTANTS **************************/
#define QDIO_MAX_BUFFERS_PER_Q 128 /* must be a power of 2 (%x=&(x-1)*/
#define QDIO_BUF_ORDER 7 /* 2**this == number of pages used for sbals in 1 q */
#define QDIO_MAX_ELEMENTS_PER_BUFFER 16
#define SBAL_SIZE 256
#define IQDIO_FILL_LEVEL_TO_POLL (QDIO_MAX_BUFFERS_PER_Q*4/3)
#define IQDIO_THININT_ISC 3
#define IQDIO_DELAY_TARGET 0
#define QDIO_BUSY_BIT_PATIENCE 2000 /* in microsecs */
#define IQDIO_GLOBAL_LAPS 2 /* GLOBAL_LAPS are not used as we */
#define IQDIO_GLOBAL_LAPS_INT 1 /* dont global summary */
#define IQDIO_LOCAL_LAPS 4
#define IQDIO_LOCAL_LAPS_INT 1
#define IQDIO_GLOBAL_SUMMARY_CC_MASK 2
/*#define IQDIO_IQDC_INT_PARM 0x1234*/
#define QDIO_Q_LAPS 5
#define QDIO_STORAGE_KEY 0
#define L2_CACHELINE_SIZE 256
#define INDICATORS_PER_CACHELINE (L2_CACHELINE_SIZE/sizeof(__u32))
#define QDIO_PERF "qdio_perf"
/* must be a power of 2 */
/*#define QDIO_STATS_NUMBER 4
#define QDIO_STATS_CLASSES 2
#define QDIO_STATS_COUNT_NEEDED 2*/
#define QDIO_NO_USE_COUNT_TIME 10
#define QDIO_NO_USE_COUNT_TIMEOUT 1000 /* wait for 1 sec on each q before
exiting without having use_count
of the queue to 0 */
#define QDIO_ESTABLISH_TIMEOUT 1000
#define QDIO_ACTIVATE_TIMEOUT 100
#define QDIO_CLEANUP_CLEAR_TIMEOUT 20000
#define QDIO_CLEANUP_HALT_TIMEOUT 10000
#define QDIO_BH AURORA_BH
#define QDIO_IRQ_BUCKETS 256 /* heavy..., but does only use a few bytes, but
be rather faster in cases of collisions
(if there really is a collision, it is
on every (traditional) interrupt and every
do_QDIO, so we rather are generous */
#define QDIO_QETH_QFMT 0
#define QDIO_ZFCP_QFMT 1
#define QDIO_IQDIO_QFMT 2
#define QDIO_IRQ_STATE_FRESH 0 /* must be 0 -> memset has set it to 0 */
#define QDIO_IRQ_STATE_INACTIVE 1
#define QDIO_IRQ_STATE_ESTABLISHED 2
#define QDIO_IRQ_STATE_ACTIVE 3
#define QDIO_IRQ_STATE_STOPPED 4
/* used as intparm in do_IO: */
#define QDIO_DOING_SENSEID 0
#define QDIO_DOING_ESTABLISH 1
#define QDIO_DOING_ACTIVATE 2
#define QDIO_DOING_CLEANUP 3
/************************* DEBUG FACILITY STUFF *********************/
/* #define QDIO_DBF_LIKE_HELL */
#define QDIO_DBF_HEX(ex,name,level,addr,len) \
do { \
if (ex) \
debug_exception(qdio_dbf_##name,level,(void*)(addr),len); \
else \
debug_event(qdio_dbf_##name,level,(void*)(addr),len); \
} while (0)
#define QDIO_DBF_TEXT(ex,name,level,text) \
do { \
if (ex) \
debug_text_exception(qdio_dbf_##name,level,text); \
else \
debug_text_event(qdio_dbf_##name,level,text); \
} while (0)
#define QDIO_DBF_HEX0(ex,name,addr,len) QDIO_DBF_HEX(ex,name,0,addr,len)
#define QDIO_DBF_HEX1(ex,name,addr,len) QDIO_DBF_HEX(ex,name,1,addr,len)
#define QDIO_DBF_HEX2(ex,name,addr,len) QDIO_DBF_HEX(ex,name,2,addr,len)
#define QDIO_DBF_HEX3(ex,name,addr,len) QDIO_DBF_HEX(ex,name,3,addr,len)
#define QDIO_DBF_HEX4(ex,name,addr,len) QDIO_DBF_HEX(ex,name,4,addr,len)
#define QDIO_DBF_HEX5(ex,name,addr,len) QDIO_DBF_HEX(ex,name,5,addr,len)
#define QDIO_DBF_HEX6(ex,name,addr,len) QDIO_DBF_HEX(ex,name,6,addr,len)
#ifdef QDIO_DBF_LIKE_HELL
#endif /* QDIO_DBF_LIKE_HELL */
#if 0
#define QDIO_DBF_HEX0(ex,name,addr,len) do {} while (0)
#define QDIO_DBF_HEX1(ex,name,addr,len) do {} while (0)
#define QDIO_DBF_HEX2(ex,name,addr,len) do {} while (0)
#ifndef QDIO_DBF_LIKE_HELL
#define QDIO_DBF_HEX3(ex,name,addr,len) do {} while (0)
#define QDIO_DBF_HEX4(ex,name,addr,len) do {} while (0)
#define QDIO_DBF_HEX5(ex,name,addr,len) do {} while (0)
#define QDIO_DBF_HEX6(ex,name,addr,len) do {} while (0)
#endif /* QDIO_DBF_LIKE_HELL */
#endif /* 0 */
#define QDIO_DBF_TEXT0(ex,name,text) QDIO_DBF_TEXT(ex,name,0,text)
#define QDIO_DBF_TEXT1(ex,name,text) QDIO_DBF_TEXT(ex,name,1,text)
#define QDIO_DBF_TEXT2(ex,name,text) QDIO_DBF_TEXT(ex,name,2,text)
#define QDIO_DBF_TEXT3(ex,name,text) QDIO_DBF_TEXT(ex,name,3,text)
#define QDIO_DBF_TEXT4(ex,name,text) QDIO_DBF_TEXT(ex,name,4,text)
#define QDIO_DBF_TEXT5(ex,name,text) QDIO_DBF_TEXT(ex,name,5,text)
#define QDIO_DBF_TEXT6(ex,name,text) QDIO_DBF_TEXT(ex,name,6,text)
#ifdef QDIO_DBF_LIKE_HELL
#endif /* QDIO_DBF_LIKE_HELL */
#if 0
#define QDIO_DBF_TEXT0(ex,name,text) do {} while (0)
#define QDIO_DBF_TEXT1(ex,name,text) do {} while (0)
#define QDIO_DBF_TEXT2(ex,name,text) do {} while (0)
#ifndef QDIO_DBF_LIKE_HELL
#define QDIO_DBF_TEXT3(ex,name,text) do {} while (0)
#define QDIO_DBF_TEXT4(ex,name,text) do {} while (0)
#define QDIO_DBF_TEXT5(ex,name,text) do {} while (0)
#define QDIO_DBF_TEXT6(ex,name,text) do {} while (0)
#endif /* QDIO_DBF_LIKE_HELL */
#endif /* 0 */
#define QDIO_DBF_SETUP_NAME "qdio_setup"
#define QDIO_DBF_SETUP_LEN 8
#define QDIO_DBF_SETUP_INDEX 2
#define QDIO_DBF_SETUP_NR_AREAS 1
#ifdef QDIO_DBF_LIKE_HELL
#define QDIO_DBF_SETUP_LEVEL 6
#else /* QDIO_DBF_LIKE_HELL */
#define QDIO_DBF_SETUP_LEVEL 2
#endif /* QDIO_DBF_LIKE_HELL */
#define QDIO_DBF_SBAL_NAME "qdio_labs" /* sbal */
#define QDIO_DBF_SBAL_LEN 256
#define QDIO_DBF_SBAL_INDEX 2
#define QDIO_DBF_SBAL_NR_AREAS 2
#ifdef QDIO_DBF_LIKE_HELL
#define QDIO_DBF_SBAL_LEVEL 6
#else /* QDIO_DBF_LIKE_HELL */
#define QDIO_DBF_SBAL_LEVEL 2
#endif /* QDIO_DBF_LIKE_HELL */
#define QDIO_DBF_TRACE_NAME "qdio_trace"
#define QDIO_DBF_TRACE_LEN 8
#define QDIO_DBF_TRACE_NR_AREAS 2
#ifdef QDIO_DBF_LIKE_HELL
#define QDIO_DBF_TRACE_INDEX 4
#define QDIO_DBF_TRACE_LEVEL 4 /* -------- could be even more verbose here */
#else /* QDIO_DBF_LIKE_HELL */
#define QDIO_DBF_TRACE_INDEX 2
#define QDIO_DBF_TRACE_LEVEL 2
#endif /* QDIO_DBF_LIKE_HELL */
#define QDIO_DBF_SENSE_NAME "qdio_sense"
#define QDIO_DBF_SENSE_LEN 64
#define QDIO_DBF_SENSE_INDEX 1
#define QDIO_DBF_SENSE_NR_AREAS 1
#ifdef QDIO_DBF_LIKE_HELL
#define QDIO_DBF_SENSE_LEVEL 6
#else /* QDIO_DBF_LIKE_HELL */
#define QDIO_DBF_SENSE_LEVEL 2
#endif /* QDIO_DBF_LIKE_HELL */
#ifdef QDIO_DBF_LIKE_HELL
#define QDIO_TRACE_QTYPE QDIO_ZFCP_QFMT
#define QDIO_DBF_SLSB_OUT_NAME "qdio_slsb_out"
#define QDIO_DBF_SLSB_OUT_LEN QDIO_MAX_BUFFERS_PER_Q
#define QDIO_DBF_SLSB_OUT_INDEX 8
#define QDIO_DBF_SLSB_OUT_NR_AREAS 1
#define QDIO_DBF_SLSB_OUT_LEVEL 6
#define QDIO_DBF_SLSB_IN_NAME "qdio_slsb_in"
#define QDIO_DBF_SLSB_IN_LEN QDIO_MAX_BUFFERS_PER_Q
#define QDIO_DBF_SLSB_IN_INDEX 8
#define QDIO_DBF_SLSB_IN_NR_AREAS 1
#define QDIO_DBF_SLSB_IN_LEVEL 6
#endif /* QDIO_DBF_LIKE_HELL */
/****************** END OF DEBUG FACILITY STUFF *********************/
typedef struct qdio_buffer_element_t {
unsigned int flags;
unsigned int length;
#ifdef QDIO_32_BIT
void *reserved;
#endif /* QDIO_32_BIT */
void *addr;
} __attribute__ ((packed,aligned(16))) qdio_buffer_element_t;
typedef struct qdio_buffer_t {
volatile qdio_buffer_element_t element[16];
} __attribute__ ((packed,aligned(256))) qdio_buffer_t;
/* params are: irq, status, qdio_error, siga_error,
queue_number, first element processed, number of elements processed,
int_parm */
typedef void qdio_handler_t(int,unsigned int,unsigned int,unsigned int,
unsigned int,int,int,unsigned long);
#define QDIO_STATUS_INBOUND_INT 0x01
#define QDIO_STATUS_OUTBOUND_INT 0x02
#define QDIO_STATUS_LOOK_FOR_ERROR 0x04
#define QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR 0x08
#define QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR 0x10
#define QDIO_STATUS_ACTIVATE_CHECK_CONDITION 0x20
#define QDIO_SIGA_ERROR_ACCESS_EXCEPTION 0x10
#define QDIO_SIGA_ERROR_B_BIT_SET 0x20
/* for qdio_initialize */
#define QDIO_INBOUND_0COPY_SBALS 0x01
#define QDIO_OUTBOUND_0COPY_SBALS 0x02
#define QDIO_USE_OUTBOUND_PCIS 0x04
/* for qdio_cleanup */
#define QDIO_FLAG_CLEANUP_USING_CLEAR 0x01
#define QDIO_FLAG_CLEANUP_USING_HALT 0x02
typedef struct qdio_initialize_t {
int irq;
unsigned char q_format;
unsigned char adapter_name[8];
unsigned int qib_param_field_format; /*adapter dependent*/
/* pointer to 128 bytes or NULL, if no param field */
unsigned char *qib_param_field; /* adapter dependent */
/* pointer to no_queues*128 words of data or NULL */
unsigned long *input_slib_elements;
unsigned long *output_slib_elements;
unsigned int min_input_threshold;
unsigned int max_input_threshold;
unsigned int min_output_threshold;
unsigned int max_output_threshold;
unsigned int no_input_qs;
unsigned int no_output_qs;
qdio_handler_t *input_handler;
qdio_handler_t *output_handler;
unsigned long int_parm;
unsigned long flags;
void **input_sbal_addr_array; /* addr of n*128 void ptrs */
void **output_sbal_addr_array; /* addr of n*128 void ptrs */
} qdio_initialize_t;
extern int qdio_initialize(qdio_initialize_t *init_data);
extern int qdio_activate(int irq,int flags);
#define QDIO_STATE_MUST_USE_OUTB_PCI 0x00000001
#define QDIO_STATE_INACTIVE 0x00000002 /* after qdio_cleanup */
#define QDIO_STATE_ESTABLISHED 0x00000004 /* after qdio_initialize */
#define QDIO_STATE_ACTIVE 0x00000008 /* after qdio_activate */
#define QDIO_STATE_STOPPED 0x00000010 /* after queues went down */
extern unsigned long qdio_get_status(int irq);
#define QDIO_FLAG_SYNC_INPUT 0x01
#define QDIO_FLAG_SYNC_OUTPUT 0x02
#define QDIO_FLAG_UNDER_INTERRUPT 0x04
#define QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT 0x08 /* no effect on
adapter interrupts */
#define QDIO_FLAG_DONT_SIGA 0x10
extern int do_QDIO(int irq,unsigned int flags, unsigned int queue_number,
unsigned int qidx,unsigned int count,
qdio_buffer_t *buffers);
extern int qdio_synchronize(int irq,unsigned int flags,
unsigned int queue_number);
extern int qdio_cleanup(int irq,int how);
unsigned char qdio_get_slsb_state(int irq,unsigned int flag,
unsigned int queue_number,
unsigned int qidx);
extern void qdio_init_scrubber(void);
/*
* QDIO device commands returned by extended Sense-ID
*/
#define DEFAULT_ESTABLISH_QS_CMD 0x1b
#define DEFAULT_ESTABLISH_QS_COUNT 0x1000
#define DEFAULT_ACTIVATE_QS_CMD 0x1f
#define DEFAULT_ACTIVATE_QS_COUNT 0
typedef struct _qdio_cmds {
unsigned char rcd; /* read configuration data */
unsigned short count_rcd;
unsigned char sii; /* set interface identifier */
unsigned short count_sii;
unsigned char rni; /* read node identifier */
unsigned short count_rni;
unsigned char eq; /* establish QDIO queues */
unsigned short count_eq;
unsigned char aq; /* activate QDIO queues */
unsigned short count_aq;
} qdio_cmds_t;
/*
* additional CIWs returned by extended Sense-ID
*/
#define CIW_TYPE_EQUEUE 0x3 /* establish QDIO queues */
#define CIW_TYPE_AQUEUE 0x4 /* activate QDIO queues */
typedef struct _qdesfmt0 {
#ifdef QDIO_32_BIT
unsigned long res1; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long sliba; /* storage-list-information-block
address */
#ifdef QDIO_32_BIT
unsigned long res2; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long sla; /* storage-list address */
#ifdef QDIO_32_BIT
unsigned long res3; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long slsba; /* storage-list-state-block address */
unsigned int res4; /* reserved */
unsigned int akey : 4; /* access key for DLIB */
unsigned int bkey : 4; /* access key for SL */
unsigned int ckey : 4; /* access key for SBALs */
unsigned int dkey : 4; /* access key for SLSB */
unsigned int res5 : 16; /* reserved */
} __attribute__ ((packed)) qdesfmt0_t;
/*
* Queue-Description record (QDR)
*/
typedef struct _qdr {
unsigned int qfmt : 8; /* queue format */
unsigned int pfmt : 8; /* impl. dep. parameter format */
unsigned int res1 : 8; /* reserved */
unsigned int ac : 8; /* adapter characteristics */
unsigned int res2 : 8; /* reserved */
unsigned int iqdcnt : 8; /* input-queue-descriptor count */
unsigned int res3 : 8; /* reserved */
unsigned int oqdcnt : 8; /* output-queue-descriptor count */
unsigned int res4 : 8; /* reserved */
unsigned int iqdsz : 8; /* input-queue-descriptor size */
unsigned int res5 : 8; /* reserved */
unsigned int oqdsz : 8; /* output-queue-descriptor size */
unsigned int res6[9]; /* reserved */
#ifdef QDIO_32_BIT
unsigned long res7; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long qiba; /* queue-information-block address */
unsigned int res8; /* reserved */
unsigned int qkey : 4; /* queue-informatio-block key */
unsigned int res9 : 28; /* reserved */
/* union _qd {*/ /* why this? */
qdesfmt0_t qdf0[126];
/* } qd;*/
} __attribute__ ((packed,aligned(4096))) qdr_t;
/*
* queue information block (QIB)
*/
#define QIB_AC_INBOUND_PCI_SUPPORTED 0x80
#define QIB_AC_OUTBOUND_PCI_SUPPORTED 0x40
typedef struct _qib {
unsigned int qfmt : 8; /* queue format */
unsigned int pfmt : 8; /* impl. dep. parameter format */
unsigned int res1 : 8; /* reserved */
unsigned int ac : 8; /* adapter characteristics */
unsigned int res2; /* reserved */
#ifdef QDIO_32_BIT
unsigned long res3; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long isliba; /* absolute address of 1st
input SLIB */
#ifdef QDIO_32_BIT
unsigned long res4; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long osliba; /* absolute address of 1st
output SLIB */
unsigned int res5; /* reserved */
unsigned int res6; /* reserved */
unsigned char ebcnam[8]; /* adapter identifier in EBCDIC */
unsigned char res7[88]; /* reserved */
unsigned char parm[QDIO_MAX_BUFFERS_PER_Q];
/* implementation dependent
parameters */
} __attribute__ ((packed,aligned(256))) qib_t;
/*
* storage-list-information block element (SLIBE)
*/
typedef struct _slibe {
#ifdef QDIO_32_BIT
unsigned long res; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long parms; /* implementation dependent
parameters */
} slibe_t;
/*
* storage-list-information block (SLIB)
*/
typedef struct _slib {
#ifdef QDIO_32_BIT
unsigned long res1; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long nsliba; /* next SLIB address (if any) */
#ifdef QDIO_32_BIT
unsigned long res2; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long sla; /* SL address */
#ifdef QDIO_32_BIT
unsigned long res3; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long slsba; /* SLSB address */
unsigned char res4[1000]; /* reserved */
slibe_t slibe[QDIO_MAX_BUFFERS_PER_Q]; /* SLIB elements */
} __attribute__ ((packed,aligned(2048))) slib_t;
typedef struct _sbal_flags {
unsigned char res1 : 1; /* reserved */
unsigned char last : 1; /* last entry */
unsigned char cont : 1; /* contiguous storage */
unsigned char res2 : 1; /* reserved */
unsigned char frag : 2; /* fragmentation (s.below) */
unsigned char res3 : 2; /* reserved */
} __attribute__ ((packed)) sbal_flags_t;
#define SBAL_FLAGS_FIRST_FRAG 0x04000000UL
#define SBAL_FLAGS_MIDDLE_FRAG 0x08000000UL
#define SBAL_FLAGS_LAST_FRAG 0x0c000000UL
#define SBAL_FLAGS_LAST_ENTRY 0x40000000UL
#define SBAL_FLAGS_CONTIGUOUS 0x20000000UL
#define SBAL_FLAGS0_DATA_CONTINUATION 0x20UL
/* Awesome OpenFCP extensions */
#define SBAL_FLAGS0_TYPE_STATUS 0x00UL
#define SBAL_FLAGS0_TYPE_WRITE 0x08UL
#define SBAL_FLAGS0_TYPE_READ 0x10UL
#define SBAL_FLAGS0_TYPE_WRITE_READ 0x18UL
#define SBAL_FLAGS0_MORE_SBALS 0x04UL
#define SBAL_FLAGS0_COMMAND 0x02UL
#define SBAL_FLAGS0_LAST_SBAL 0x00UL
#define SBAL_FLAGS0_ONLY_SBAL SBAL_FLAGS0_COMMAND
#define SBAL_FLAGS0_MIDDLE_SBAL SBAL_FLAGS0_MORE_SBALS
#define SBAL_FLAGS0_FIRST_SBAL SBAL_FLAGS0_MORE_SBALS | SBAL_FLAGS0_COMMAND
/* Naught of interest beyond this point */
#define SBAL_FLAGS0_PCI 0x40
typedef struct _sbal_sbalf_0 {
unsigned char res1 : 1; /* reserved */
unsigned char pci : 1; /* PCI indicator */
unsigned char cont : 1; /* data continuation */
unsigned char sbtype: 2; /* storage-block type (OpenFCP) */
unsigned char res2 : 3; /* reserved */
} __attribute__ ((packed)) sbal_sbalf_0_t;
typedef struct _sbal_sbalf_1 {
unsigned char res1 : 4; /* reserved */
unsigned char key : 4; /* storage key */
} __attribute__ ((packed)) sbal_sbalf_1_t;
typedef struct _sbal_sbalf_14 {
unsigned char res1 : 4; /* reserved */
unsigned char erridx : 4; /* error index */
} __attribute__ ((packed)) sbal_sbalf_14_t;
typedef struct _sbal_sbalf_15 {
unsigned char reason; /* reserved */
} __attribute__ ((packed)) sbal_sbalf_15_t;
typedef union _sbal_sbalf {
sbal_sbalf_0_t i0;
sbal_sbalf_1_t i1;
sbal_sbalf_14_t i14;
sbal_sbalf_15_t i15;
unsigned char value;
} sbal_sbalf_t;
typedef struct _sbale {
union {
sbal_flags_t bits; /* flags */
unsigned char value;
} flags;
unsigned int res1 : 16; /* reserved */
sbal_sbalf_t sbalf; /* SBAL flags */
unsigned int res2 : 16; /* reserved */
unsigned int count : 16; /* data count */
#ifdef QDIO_32_BIT
unsigned long res3; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long addr; /* absolute data address */
} __attribute__ ((packed,aligned(16))) sbal_element_t;
/*
* strorage-block access-list (SBAL)
*/
typedef struct _sbal {
sbal_element_t element[QDIO_MAX_ELEMENTS_PER_BUFFER];
} __attribute__ ((packed,aligned(256))) sbal_t;
/*
* storage-list (SL)
*/
typedef struct _sl_element {
#ifdef QDIO_32_BIT
unsigned long res; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long sbal; /* absolute SBAL address */
} __attribute__ ((packed)) sl_element_t;
typedef struct _sl {
sl_element_t element[QDIO_MAX_BUFFERS_PER_Q];
} __attribute__ ((packed,aligned(1024))) sl_t;
/*
* storage-list-state block (SLSB)
*/
/*typedef struct _slsb_val {*/
/* unsigned char value; */ /* SLSB entry as a single byte value */
/*} __attribute__ ((packed)) slsb_val_t;*/
typedef struct _slsb_flags {
unsigned char owner : 2; /* SBAL owner */
unsigned char type : 1; /* buffer type */
unsigned char state : 5; /* processing state */
} __attribute__ ((packed)) slsb_flags_t;
typedef struct _slsb {
union _acc {
unsigned char val[QDIO_MAX_BUFFERS_PER_Q];
slsb_flags_t flags[QDIO_MAX_BUFFERS_PER_Q];
} acc;
} __attribute__ ((packed,aligned(256))) slsb_t;
/*
* SLSB values
*/
#define SLSB_OWNER_PROG 1
#define SLSB_OWNER_CU 2
#define SLSB_TYPE_INPUT 0
#define SLSB_TYPE_OUTPUT 1
#define SLSB_STATE_NOT_INIT 0
#define SLSB_STATE_EMPTY 1
#define SLSB_STATE_PRIMED 2
#define SLSB_STATE_HALTED 0xe
#define SLSB_STATE_ERROR 0xf
#define SLSB_P_INPUT_NOT_INIT 0x80
#define SLSB_P_INPUT_PROCESSING 0x81
#define SLSB_CU_INPUT_EMPTY 0x41
#define SLSB_P_INPUT_PRIMED 0x82
#define SLSB_P_INPUT_HALTED 0x8E
#define SLSB_P_INPUT_ERROR 0x8F
#define SLSB_P_OUTPUT_NOT_INIT 0xA0
#define SLSB_P_OUTPUT_EMPTY 0xA1
#define SLSB_CU_OUTPUT_PRIMED 0x62
#define SLSB_P_OUTPUT_HALTED 0xAE
#define SLSB_P_OUTPUT_ERROR 0xAF
#define SLSB_ERROR_DURING_LOOKUP 0xFF
/*
* path management control word (extended layout)
*/
typedef struct {
unsigned long intparm; /* interruption parameter */
unsigned int qf : 1; /* qdio facility */
unsigned int res0 : 1; /* reserved */
unsigned int isc : 3; /* interruption sublass */
unsigned int res5 : 3; /* reserved zeros */
unsigned int ena : 1; /* enabled */
unsigned int lm : 2; /* limit mode */
unsigned int mme : 2; /* measurement-mode enable */
unsigned int mp : 1; /* multipath mode */
unsigned int tf : 1; /* timing facility */
unsigned int dnv : 1; /* device number valid */
unsigned int dev : 16; /* device number */
unsigned char lpm; /* logical path mask */
unsigned char pnom; /* path not operational mask */
unsigned char lpum; /* last path used mask */
unsigned char pim; /* path installed mask */
unsigned short mbi; /* measurement-block index */
unsigned char pom; /* path operational mask */
unsigned char pam; /* path available mask */
unsigned char chpid[8]; /* CHPID 0-7 (if available) */
unsigned int res1 : 8; /* reserved */
unsigned int st : 3; /* */
unsigned int res2 : 20; /* reserved */
unsigned int csense : 1; /* concurrent sense; can be enabled
per MSCH, however, if facility
is not installed, this results
in an operand exception. */
} pmcw_e_t;
/*
* subchannel status word (extended layout)
*/
typedef struct {
unsigned int key : 4; /* subchannel key */
unsigned int sctl : 1; /* suspend control */
unsigned int eswf : 1; /* ESW format */
unsigned int cc : 2; /* deferred condition code */
unsigned int fmt : 1; /* format */
unsigned int pfch : 1; /* prefetch */
unsigned int isic : 1; /* initial-status interruption control */
unsigned int alcc : 1; /* address-limit checking control */
unsigned int ssi : 1; /* supress-suspended interruption */
unsigned int zcc : 1; /* zero condition code */
unsigned int ectl : 1; /* extended control */
unsigned int pno : 1; /* path not operational */
unsigned int qact : 1; /* qdio active */
unsigned int fctl : 3; /* function control */
unsigned int actl : 7; /* activity control */
unsigned int stctl : 5; /* status control */
unsigned long cpa; /* channel program address */
unsigned int dstat : 8; /* device status */
unsigned int cstat : 8; /* subchannel status */
unsigned int count : 16; /* residual count */
} scsw_e_t;
typedef struct qdio_q_t {
volatile slsb_t slsb;
__u32 * volatile dev_st_chg_ind;
int is_input_q;
int is_0copy_sbals_q;
int irq;
unsigned int is_iqdio_q;
/* bit 0 means queue 0, bit 1 means queue 1, ... */
unsigned int mask;
unsigned int q_no;
qdio_handler_t (*handler);
/* points to the next buffer to be checked for having
* been processed by the card (outbound)
* or to the next buffer the program should check for (inbound) */
volatile int first_to_check;
/* and the last time it was: */
volatile int last_move_ftc;
atomic_t number_of_buffers_used;
atomic_t polling;
unsigned int siga_in;
unsigned int siga_out;
unsigned int siga_sync;
unsigned int siga_sync_done_on_thinints;
unsigned int hydra_gives_outbound_pcis;
/* used to save beginning position when calling dd_handlers */
int first_element_to_kick;
atomic_t use_count;
atomic_t is_in_shutdown;
#ifdef QDIO_USE_TIMERS_FOR_POLLING
struct timer_list timer;
atomic_t timer_already_set;
spinlock_t timer_lock;
#else /* QDIO_USE_TIMERS_FOR_POLLING */
struct tasklet_struct tasklet;
#endif /* QDIO_USE_TIMERS_FOR_POLLING */
unsigned int state;
/* used to store the error condition during a data transfer */
unsigned int qdio_error;
unsigned int siga_error;
unsigned int error_status_flags;
/* list of interesting queues */
volatile struct qdio_q_t *list_next;
volatile struct qdio_q_t *list_prev;
slib_t *slib; /* a page is allocated under this pointer,
sl points into this page, offset PAGE_SIZE/2
(after slib) */
sl_t *sl;
volatile sbal_t *sbal[QDIO_MAX_BUFFERS_PER_Q];
qdio_buffer_t *qdio_buffers[QDIO_MAX_BUFFERS_PER_Q];
unsigned long int_parm;
/*struct {
int in_bh_check_limit;
int threshold;
} threshold_classes[QDIO_STATS_CLASSES];*/
struct {
/* inbound: the time to stop polling
outbound: the time to kick peer */
int threshold; /* the real value */
/* outbound: last time of do_QDIO
inbound: last time of noticing incoming data */
/*__u64 last_transfer_times[QDIO_STATS_NUMBER];
int last_transfer_index; */
__u64 last_transfer_time;
} timing;
unsigned int queue_type;
} __attribute__ ((aligned(256))) qdio_q_t;
typedef struct qdio_irq_t {
__u32 * volatile dev_st_chg_ind;
unsigned long int_parm;
int irq;
unsigned int is_iqdio_irq;
unsigned int hydra_gives_outbound_pcis;
unsigned int sync_done_on_outb_pcis;
unsigned int state;
spinlock_t setting_up_lock;
unsigned int no_input_qs;
unsigned int no_output_qs;
unsigned char qdioac;
qdio_q_t *input_qs[QDIO_MAX_QUEUES_PER_IRQ];
qdio_q_t *output_qs[QDIO_MAX_QUEUES_PER_IRQ];
ccw1_t ccw;
int io_result_cstat;
int io_result_dstat;
int io_result_flags;
atomic_t interrupt_has_arrived;
atomic_t interrupt_has_been_cleaned;
wait_queue_head_t wait_q;
qdr_t *qdr;
qdio_cmds_t commands;
qib_t qib;
io_handler_func_t original_int_handler;
struct qdio_irq_t *next;
} qdio_irq_t;
#define QDIO_CHSC_RESPONSE_CODE_OK 1
/* flags for st qdio sch data */
#define CHSC_FLAG_QDIO_CAPABILITY 0x80
#define CHSC_FLAG_VALIDITY 0x40
#define CHSC_FLAG_SIGA_INPUT_NECESSARY 0x40
#define CHSC_FLAG_SIGA_OUTPUT_NECESSARY 0x20
#define CHSC_FLAG_SIGA_SYNC_NECESSARY 0x10
#define CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS 0x08
#define CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS 0x04
typedef struct qdio_chsc_area_t {
struct {
/* word 0 */
__u16 command_code1;
__u16 command_code2;
/* word 1 */
__u16 operation_code;
__u16 first_sch;
/* word 2 */
__u8 reserved1;
__u8 image_id;
__u16 last_sch;
/* word 3 */
__u32 reserved2;
/* word 4 */
union {
struct {
/* word 4&5 */
__u64 summary_indicator_addr;
/* word 6&7 */
__u64 subchannel_indicator_addr;
/* word 8 */
int ks:4;
int kc:4;
int reserved1:21;
int isc:3;
/* word 9&10 */
__u32 reserved2[2];
/* word 11 */
__u32 subsystem_id;
/* word 12-1015 */
__u32 reserved3[1004];
} __attribute__ ((packed,aligned(4))) set_chsc;
struct {
/* word 4&5 */
__u32 reserved1[2];
/* word 6 */
__u32 delay_target;
/* word 7-1015 */
__u32 reserved4[1009];
} __attribute__ ((packed,aligned(4))) set_chsc_fast;
struct {
/* word 0 */
__u16 length;
__u16 response_code;
/* word 1 */
__u32 reserved1;
/* words 2 to 9 for st sch qdio data */
__u8 flags;
__u8 reserved2;
__u16 sch;
__u8 qfmt;
__u8 reserved3;
__u8 qdioac;
__u8 sch_class;
__u8 reserved4;
__u8 icnt;
__u8 reserved5;
__u8 ocnt;
/* plus 5 words of reserved fields */
} __attribute__ ((packed,aligned(8)))
store_qdio_data_response;
} operation_data_area;
} __attribute__ ((packed,aligned(8))) request_block;
struct {
/* word 0 */
__u16 length;
__u16 response_code;
/* word 1 */
__u32 reserved1;
} __attribute__ ((packed,aligned(8))) response_block;
} __attribute__ ((packed,aligned(PAGE_SIZE))) qdio_chsc_area_t;
#define QDIO_PRINTK_HEADER QDIO_NAME ": "
#if QDIO_VERBOSE_LEVEL>8
#define QDIO_PRINT_STUPID(x...) printk( KERN_DEBUG QDIO_PRINTK_HEADER x)
#else
#define QDIO_PRINT_STUPID(x...)
#endif
#if QDIO_VERBOSE_LEVEL>7
#define QDIO_PRINT_ALL(x...) printk( QDIO_PRINTK_HEADER x)
#else
#define QDIO_PRINT_ALL(x...)
#endif
#if QDIO_VERBOSE_LEVEL>6
#define QDIO_PRINT_INFO(x...) printk( QDIO_PRINTK_HEADER x)
#else
#define QDIO_PRINT_INFO(x...)
#endif
#if QDIO_VERBOSE_LEVEL>5
#define QDIO_PRINT_WARN(x...) printk( QDIO_PRINTK_HEADER x)
#else
#define QDIO_PRINT_WARN(x...)
#endif
#if QDIO_VERBOSE_LEVEL>4
#define QDIO_PRINT_ERR(x...) printk( QDIO_PRINTK_HEADER x)
#else
#define QDIO_PRINT_ERR(x...)
#endif
#if QDIO_VERBOSE_LEVEL>3
#define QDIO_PRINT_CRIT(x...) printk( QDIO_PRINTK_HEADER x)
#else
#define QDIO_PRINT_CRIT(x...)
#endif
#if QDIO_VERBOSE_LEVEL>2
#define QDIO_PRINT_ALERT(x...) printk( QDIO_PRINTK_HEADER x)
#else
#define QDIO_PRINT_ALERT(x...)
#endif
#if QDIO_VERBOSE_LEVEL>1
#define QDIO_PRINT_EMERG(x...) printk( QDIO_PRINTK_HEADER x)
#else
#define QDIO_PRINT_EMERG(x...)
#endif
#endif /* __QDIO_H__ */
#ifndef _S390_RWSEM_H
#define _S390_RWSEM_H
/*
* include/asm-s390/rwsem.h
*
* S390 version
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
*/
/*
*
* The MSW of the count is the negated number of active writers and waiting
* lockers, and the LSW is the total number of active locks
*
* The lock count is initialized to 0 (no active and no waiting lockers).
*
* When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
* uncontended lock. This can be determined because XADD returns the old value.
* Readers increment by 1 and see a positive value when uncontended, negative
* if there are writers (and maybe) readers waiting (in which case it goes to
* sleep).
*
* The value of WAITING_BIAS supports up to 32766 waiting processes. This can
* be extended to 65534 by manually checking the whole MSW rather than relying
* on the S flag.
*
* The value of ACTIVE_BIAS supports up to 65535 active processes.
*
* This should be totally fair - if anything is waiting, a process that wants a
* lock will go to the back of the queue. When the currently active lock is
* released, if there's a writer at the front of the queue, then that and only
* that will be woken up; if there's a bunch of consequtive readers at the
* front, then they'll all be woken up, but no other readers will be.
*/
#ifndef _LINUX_RWSEM_H
#error please dont include asm/rwsem.h directly, use linux/rwsem.h instead
#endif
#ifdef __KERNEL__
#include <linux/list.h>
#include <linux/spinlock.h>
struct rwsem_waiter;
extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
/*
* the semaphore definition
*/
struct rw_semaphore {
signed long count;
spinlock_t wait_lock;
struct list_head wait_list;
};
#define RWSEM_UNLOCKED_VALUE 0x00000000
#define RWSEM_ACTIVE_BIAS 0x00000001
#define RWSEM_ACTIVE_MASK 0x0000ffff
#define RWSEM_WAITING_BIAS (-0x00010000)
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
/*
* initialisation
*/
#define __RWSEM_INITIALIZER(name) \
{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) }
#define DECLARE_RWSEM(name) \
struct rw_semaphore name = __RWSEM_INITIALIZER(name)
static inline void init_rwsem(struct rw_semaphore *sem)
{
sem->count = RWSEM_UNLOCKED_VALUE;
spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
}
/*
* lock for reading
*/
static inline void __down_read(struct rw_semaphore *sem)
{
signed long old, new;
__asm__ __volatile__(
" l %0,0(%2)\n"
"0: lr %1,%0\n"
" ahi %1,%3\n"
" cs %0,%1,0(%2)\n"
" jl 0b"
: "=&d" (old), "=&d" (new)
: "a" (&sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
: "cc", "memory" );
if (old < 0)
rwsem_down_read_failed(sem);
}
/*
* lock for writing
*/
static inline void __down_write(struct rw_semaphore *sem)
{
signed long old, new, tmp;
tmp = RWSEM_ACTIVE_WRITE_BIAS;
__asm__ __volatile__(
" l %0,0(%2)\n"
"0: lr %1,%0\n"
" a %1,%3\n"
" cs %0,%1,0(%2)\n"
" jl 0b"
: "=&d" (old), "=&d" (new)
: "a" (&sem->count), "m" (tmp)
: "cc", "memory" );
if (old != 0)
rwsem_down_write_failed(sem);
}
/*
* unlock after reading
*/
static inline void __up_read(struct rw_semaphore *sem)
{
signed long old, new;
__asm__ __volatile__(
" l %0,0(%2)\n"
"0: lr %1,%0\n"
" ahi %1,%3\n"
" cs %0,%1,0(%2)\n"
" jl 0b"
: "=&d" (old), "=&d" (new)
: "a" (&sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS)
: "cc", "memory" );
if (new < 0)
if ((new & RWSEM_ACTIVE_MASK) == 0)
rwsem_wake(sem);
}
/*
* unlock after writing
*/
static inline void __up_write(struct rw_semaphore *sem)
{
signed long old, new, tmp;
tmp = -RWSEM_ACTIVE_WRITE_BIAS;
__asm__ __volatile__(
" l %0,0(%2)\n"
"0: lr %1,%0\n"
" a %1,%3\n"
" cs %0,%1,0(%2)\n"
" jl 0b"
: "=&d" (old), "=&d" (new)
: "a" (&sem->count), "m" (tmp)
: "cc", "memory" );
if (new < 0)
if ((new & RWSEM_ACTIVE_MASK) == 0)
rwsem_wake(sem);
}
/*
* implement atomic add functionality
*/
static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
{
signed long old, new;
__asm__ __volatile__(
" l %0,0(%2)\n"
"0: lr %1,%0\n"
" ar %1,%3\n"
" cs %0,%1,0(%2)\n"
" jl 0b"
: "=&d" (old), "=&d" (new)
: "a" (&sem->count), "d" (delta)
: "cc", "memory" );
}
/*
* implement exchange and add functionality
*/
static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
{
signed long old, new;
__asm__ __volatile__(
" l %0,0(%2)\n"
"0: lr %1,%0\n"
" ar %1,%3\n"
" cs %0,%1,0(%2)\n"
" jl 0b"
: "=&d" (old), "=&d" (new)
: "a" (&sem->count), "d" (delta)
: "cc", "memory" );
return new;
}
#endif /* __KERNEL__ */
#endif /* _S390_RWSEM_H */
/*
* include/asm-s390/s390-gdbregs.h
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
*
* used both by the linux kernel for remote debugging & gdb
*/
#ifndef _S390_GDBREGS_H
#define _S390_GDBREGS_H
#ifdef __KERNEL__
#include <asm/s390-regs-common.h>
#else
#include <s390/s390-regs-common.h>
#endif
#define S390_MAX_INSTR_SIZE 6
#define NUM_REGS (2+NUM_GPRS+NUM_ACRS+NUM_CRS+1+NUM_FPRS)
#define FIRST_ACR (2+NUM_GPRS)
#define LAST_ACR (FIRST_ACR+NUM_ACRS-1)
#define FIRST_CR (FIRST_ACR+NUM_ACRS)
#define LAST_CR (FIRST_CR+NUM_CRS-1)
#define PSWM_REGNUM 0
#define PC_REGNUM 1
#define GP0_REGNUM 2 /* GPR register 0 */
#define GP_LAST_REGNUM (GP0_REGNUM+NUM_GPRS-1)
#define RETADDR_REGNUM (GP0_REGNUM+14) /* Usually return address */
#define SP_REGNUM (GP0_REGNUM+15) /* Contains address of top of stack */
#define FP_REGNUM SP_REGNUM /* needed in findvar.c still */
#define FRAME_REGNUM (GP0_REGNUM+11)
#define FPC_REGNUM (GP0_REGNUM+NUM_GPRS+NUM_ACRS+NUM_CRS)
#define FP0_REGNUM (FPC_REGNUM+1) /* FPR (Floating point) register 0 */
#define FPLAST_REGNUM (FP0_REGNUM+NUM_FPRS-1) /* Last floating point register */
/* The top of this structure is as similar as possible to a pt_regs structure to */
/* simplify code */
typedef struct
{
S390_REGS_COMMON
__u32 crs[NUM_CRS];
s390_fp_regs fp_regs;
} s390_gdb_regs __attribute__((packed));
#define REGISTER_NAMES \
{ \
"pswm","pswa", \
"gpr0","gpr1","gpr2","gpr3","gpr4","gpr5","gpr6","gpr7", \
"gpr8","gpr9","gpr10","gpr11","gpr12","gpr13","gpr14","gpr15", \
"acr0","acr1","acr2","acr3","acr4","acr5","acr6","acr7", \
"acr8","acr9","acr10","acr11","acr12","acr13","acr14","acr15", \
"cr0","cr1","cr2","cr3","cr4","cr5","cr6","cr7", \
"cr8","cr9","cr10","cr11","cr12","cr13","cr14","cr15", \
"fpc", \
"fpr0","fpr1","fpr2","fpr3","fpr4","fpr5","fpr6","fpr7", \
"fpr8","fpr9","fpr10","fpr11","fpr12","fpr13","fpr14","fpr15" \
}
/* Index within `registers' of the first byte of the space for
register N. */
#define FP0_OFFSET ((PSW_MASK_SIZE+PSW_ADDR_SIZE)+ \
(GPR_SIZE*NUM_GPRS)+(ACR_SIZE+NUM_ACRS)+ \
(CR_SIZE*NUM_CRS)+(FPC_SIZE+FPC_PAD_SIZE))
#define REGISTER_BYTES \
((FP0_OFFSET)+(FPR_SIZE*NUM_FPRS))
#define REGISTER_BYTE(N) ((N) < FP0_REGNUM ? (N)*4:(FP0_OFFSET+((N)-FP0_REGNUM)*FPR_SIZE))
#endif
......@@ -25,6 +25,10 @@ typedef struct ext_int_info_t {
extern ext_int_info_t *ext_int_hash[];
int register_external_interrupt(__u16 code, ext_int_handler_t handler);
int register_early_external_interrupt(__u16 code, ext_int_handler_t handler,
ext_int_info_t *info);
int unregister_external_interrupt(__u16 code, ext_int_handler_t handler);
int unregister_early_external_interrupt(__u16 code, ext_int_handler_t handler,
ext_int_info_t *info);
#endif
......@@ -18,10 +18,13 @@
typedef struct _ioinfo {
unsigned int irq; /* aka. subchannel number */
spinlock_t irq_lock; /* irq lock */
void *private_data; /* pointer to private data */
struct _ioinfo *prev;
struct _ioinfo *next;
__u8 st; /* subchannel type */
union {
unsigned int info;
struct {
......@@ -50,10 +53,9 @@ typedef struct _ioinfo {
unsigned int esid : 1; /* Ext. SenseID supported by HW */
unsigned int rcd : 1; /* RCD supported by HW */
unsigned int repnone : 1; /* don't call IRQ handler on interrupt */
unsigned int newreq : 1; /* new register interface */
unsigned int dval : 1; /* device number valid */
unsigned int unknown : 1; /* unknown device - if SenseID failed */
unsigned int unused : (sizeof(unsigned int)*8 - 24); /* unused */
unsigned int unused : (sizeof(unsigned int)*8 - 23); /* unused */
} __attribute__ ((packed)) flags;
} ui;
......@@ -75,6 +77,7 @@ typedef struct _ioinfo {
unsigned long qintparm; /* queued interruption parameter */
unsigned long qflag; /* queued flags */
__u8 qlpm; /* queued logical path mask */
ssd_info_t ssd_info; /* subchannel description */
} __attribute__ ((aligned(8))) ioinfo_t;
......@@ -89,6 +92,12 @@ typedef struct _ioinfo {
#define IOINFO_FLAGS_REPALL 0x00800000
extern ioinfo_t *ioinfo[];
int s390_set_private_data(int irq, void * data);
void * s390_get_private_data(int irq);
#define CHSC_SEI_ACC_CHPID 1
#define CHSC_SEI_ACC_LINKADDR 2
#define CHSC_SEI_ACC_FULLLINKADDR 3
#endif /* __s390io_h */
......@@ -13,10 +13,23 @@
#include <asm/types.h>
typedef struct _mci {
__u32 to_be_defined_1 : 9;
__u32 cp : 1; /* channel-report pending */
__u32 to_be_defined_2 : 22;
__u32 to_be_defined_3;
__u32 sd : 1; /* 00 system damage */
__u32 pd : 1; /* 01 instruction-processing damage */
__u32 sr : 1; /* 02 system recovery */
__u32 to_be_defined_1 : 4; /* 03-06 */
__u32 dg : 1; /* 07 degradation */
__u32 w : 1; /* 08 warning pending */
__u32 cp : 1; /* 09 channel-report pending */
__u32 to_be_defined_2 : 6; /* 10-15 */
__u32 se : 1; /* 16 storage error uncorrected */
__u32 sc : 1; /* 17 storage error corrected */
__u32 ke : 1; /* 18 storage-key error uncorrected */
__u32 ds : 1; /* 19 storage degradation */
__u32 to_be_defined_3 : 4; /* 20-23 */
__u32 fa : 1; /* 24 failing storage address validity */
__u32 to_be_defined_4 : 7; /* 25-31 */
__u32 ie : 1; /* 32 indirect storage error */
__u32 to_be_defined_5 : 31; /* 33-63 */
} mci_t;
//
......
#ifndef _ASMS390X_SCATTERLIST_H
#define _ASMS390X_SCATTERLIST_H
#ifndef _ASMS390_SCATTERLIST_H
#define _ASMS390_SCATTERLIST_H
struct scatterlist {
struct page *page;
......
......@@ -2,7 +2,7 @@
* include/asm-s390/semaphore.h
*
* S390 version
* Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
*
* Derived from "include/asm-i386/semaphore.h"
* (C) Copyright 1996 Linus Torvalds
......@@ -17,16 +17,17 @@
#include <linux/rwsem.h>
struct semaphore {
/*
* Note that any negative value of count is equivalent to 0,
* but additionally indicates that some process(es) might be
* sleeping on `wait'.
*/
atomic_t count;
int sleepers;
wait_queue_head_t wait;
};
#define __SEM_DEBUG_INIT(name)
#define __SEMAPHORE_INITIALIZER(name,count) \
{ ATOMIC_INIT(count), 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
__SEM_DEBUG_INIT(name) }
{ ATOMIC_INIT(count), __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) }
#define __MUTEX_INITIALIZER(name) \
__SEMAPHORE_INITIALIZER(name,1)
......@@ -39,7 +40,7 @@ struct semaphore {
static inline void sema_init (struct semaphore *sem, int val)
{
*sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
*sem = (struct semaphore) __SEMAPHORE_INITIALIZER((*sem),val);
}
static inline void init_MUTEX (struct semaphore *sem)
......@@ -52,11 +53,6 @@ static inline void init_MUTEX_LOCKED (struct semaphore *sem)
sema_init(sem, 0);
}
asmlinkage void __down_failed(void /* special register calling convention */);
asmlinkage int __down_failed_interruptible(void /* params in registers */);
asmlinkage int __down_failed_trylock(void /* params in registers */);
asmlinkage void __up_wakeup(void /* special register calling convention */);
asmlinkage void __down(struct semaphore * sem);
asmlinkage int __down_interruptible(struct semaphore * sem);
asmlinkage int __down_trylock(struct semaphore * sem);
......@@ -79,11 +75,28 @@ static inline int down_interruptible(struct semaphore * sem)
static inline int down_trylock(struct semaphore * sem)
{
int ret = 0;
if (atomic_dec_return(&sem->count) < 0)
ret = __down_trylock(sem);
return ret;
int old_val, new_val;
/*
* This inline assembly atomically implements the equivalent
* to the following C code:
* old_val = sem->count.counter;
* if ((new_val = old_val) > 0)
* sem->count.counter = --new_val;
* In the ppc code this is called atomic_dec_if_positive.
*/
__asm__ __volatile__ (
" l %0,0(%3)\n"
"0: ltr %1,%0\n"
" jle 1f\n"
" ahi %1,-1\n"
" cs %0,%1,0(%3)\n"
" jl 0b\n"
"1:"
: "=&d" (old_val), "=&d" (new_val),
"+m" (sem->count.counter)
: "a" (&sem->count.counter) : "cc" );
return old_val <= 0;
}
static inline void up(struct semaphore * sem)
......
......@@ -13,7 +13,7 @@
#define RAMDISK_ORIGIN 0x800000
#define RAMDISK_SIZE 0x800000
#ifndef __ASSEMBLER__
#ifndef __ASSEMBLY__
#define IPL_DEVICE (*(unsigned long *) (0x10404))
#define INITRD_START (*(unsigned long *) (0x1040C))
......
......@@ -13,81 +13,74 @@
#include <asm-generic/siginfo.h>
/*
* si_code values
* Digital reserves positive values for kernel-generated signals.
*/
#undef SI_TIMER
#define SI_TIMER -2 /* sent by timer expiration */
/*
* SIGILL si_codes
*/
#define ILL_ILLOPC 1 /* illegal opcode */
#define ILL_ILLOPN 2 /* illegal operand */
#define ILL_ILLADR 3 /* illegal addressing mode */
#define ILL_ILLTRP 4 /* illegal trap */
#define ILL_PRVOPC 5 /* privileged opcode */
#define ILL_PRVREG 6 /* privileged register */
#define ILL_COPROC 7 /* coprocessor error */
#define ILL_BADSTK 8 /* internal stack error */
#define ILL_ILLOPC (__SI_FAULT|1) /* illegal opcode */
#define ILL_ILLOPN (__SI_FAULT|2) /* illegal operand */
#define ILL_ILLADR (__SI_FAULT|3) /* illegal addressing mode */
#define ILL_ILLTRP (__SI_FAULT|4) /* illegal trap */
#define ILL_PRVOPC (__SI_FAULT|5) /* privileged opcode */
#define ILL_PRVREG (__SI_FAULT|6) /* privileged register */
#define ILL_COPROC (__SI_FAULT|7) /* coprocessor error */
#define ILL_BADSTK (__SI_FAULT|8) /* internal stack error */
#define NSIGILL 8
/*
* SIGFPE si_codes
*/
#define FPE_INTDIV 1 /* integer divide by zero */
#define FPE_INTOVF 2 /* integer overflow */
#define FPE_FLTDIV 3 /* floating point divide by zero */
#define FPE_FLTOVF 4 /* floating point overflow */
#define FPE_FLTUND 5 /* floating point underflow */
#define FPE_FLTRES 6 /* floating point inexact result */
#define FPE_FLTINV 7 /* floating point invalid operation */
#define FPE_FLTSUB 8 /* subscript out of range */
#define FPE_INTDIV (__SI_FAULT|1) /* integer divide by zero */
#define FPE_INTOVF (__SI_FAULT|2) /* integer overflow */
#define FPE_FLTDIV (__SI_FAULT|3) /* floating point divide by zero */
#define FPE_FLTOVF (__SI_FAULT|4) /* floating point overflow */
#define FPE_FLTUND (__SI_FAULT|5) /* floating point underflow */
#define FPE_FLTRES (__SI_FAULT|6) /* floating point inexact result */
#define FPE_FLTINV (__SI_FAULT|7) /* floating point invalid operation */
#define FPE_FLTSUB (__SI_FAULT|8) /* subscript out of range */
#define NSIGFPE 8
/*
* SIGSEGV si_codes
*/
#define SEGV_MAPERR 1 /* address not mapped to object */
#define SEGV_ACCERR 2 /* invalid permissions for mapped object */
#define SEGV_MAPERR (__SI_FAULT|1) /* address not mapped to object */
#define SEGV_ACCERR (__SI_FAULT|2) /* invalid permissions for mapped object */
#define NSIGSEGV 2
/*
* SIGBUS si_codes
*/
#define BUS_ADRALN 1 /* invalid address alignment */
#define BUS_ADRERR 2 /* non-existant physical address */
#define BUS_OBJERR 3 /* object specific hardware error */
#define BUS_ADRALN (__SI_FAULT|1) /* invalid address alignment */
#define BUS_ADRERR (__SI_FAULT|2) /* non-existant physical address */
#define BUS_OBJERR (__SI_FAULT|3) /* object specific hardware error */
#define NSIGBUS 3
/*
* SIGTRAP si_codes
*/
#define TRAP_BRKPT 1 /* process breakpoint */
#define TRAP_TRACE 2 /* process trace trap */
#define TRAP_BRKPT (__SI_FAULT|1) /* process breakpoint */
#define TRAP_TRACE (__SI_FAULT|2) /* process trace trap */
#define NSIGTRAP 2
/*
* SIGCHLD si_codes
*/
#define CLD_EXITED 1 /* child has exited */
#define CLD_KILLED 2 /* child was killed */
#define CLD_DUMPED 3 /* child terminated abnormally */
#define CLD_TRAPPED 4 /* traced child has trapped */
#define CLD_STOPPED 5 /* child has stopped */
#define CLD_CONTINUED 6 /* stopped child has continued */
#define CLD_EXITED (__SI_CHLD|1) /* child has exited */
#define CLD_KILLED (__SI_CHLD|2) /* child was killed */
#define CLD_DUMPED (__SI_CHLD|3) /* child terminated abnormally */
#define CLD_TRAPPED (__SI_CHLD|4) /* traced child has trapped */
#define CLD_STOPPED (__SI_CHLD|5) /* child has stopped */
#define CLD_CONTINUED (__SI_CHLD|6) /* stopped child has continued */
#define NSIGCHLD 6
/*
* SIGPOLL si_codes
*/
#define POLL_IN 1 /* data input available */
#define POLL_OUT 2 /* output buffers available */
#define POLL_MSG 3 /* input message available */
#define POLL_ERR 4 /* i/o error */
#define POLL_PRI 5 /* high priority input available */
#define POLL_HUP 6 /* device disconnected */
#define POLL_IN (__SI_POLL|1) /* data input available */
#define POLL_OUT (__SI_POLL|2) /* output buffers available */
#define POLL_MSG (__SI_POLL|3) /* input message available */
#define POLL_ERR (__SI_POLL|4) /* i/o error */
#define POLL_PRI (__SI_POLL|5) /* high priority input available */
#define POLL_HUP (__SI_POLL|6) /* device disconnected */
#define NSIGPOLL 6
#endif
......@@ -13,6 +13,7 @@
/* Avoid too many header ordering problems. */
struct siginfo;
struct pt_regs;
#ifdef __KERNEL__
/* Most things should be clean enough to redefine this at will, if care
......
......@@ -59,9 +59,6 @@ typedef enum
typedef enum
{
ec_schedule=0,
ec_restart,
ec_halt,
ec_power_off,
ec_call_function,
ec_bit_last
} ec_bit_sig;
......@@ -129,6 +126,6 @@ signal_processor_ps(__u32 *statusptr, __u32 parameter,
return ccode;
}
#endif __SIGP__
#endif /* __SIGP__ */
......@@ -10,6 +10,8 @@
#define __ASM_SMP_H
#include <linux/config.h>
#include <linux/threads.h>
#include <linux/ptrace.h>
#if defined(__KERNEL__) && defined(CONFIG_SMP) && !defined(__ASSEMBLY__)
......@@ -26,7 +28,7 @@ typedef struct
__u16 cpu;
} sigp_info;
extern unsigned long cpu_online_map;
extern volatile unsigned long cpu_online_map;
#define NO_PROC_ID 0xFF /* No processor magic marker */
......@@ -42,7 +44,7 @@ extern unsigned long cpu_online_map;
#define PROC_CHANGE_PENALTY 20 /* Schedule penalty */
#define smp_processor_id() (current->processor)
#define smp_processor_id() (current_thread_info()->cpu)
extern __inline__ int cpu_logical_map(int cpu)
{
......@@ -64,7 +66,5 @@ extern __inline__ __u16 hard_smp_processor_id(void)
#define cpu_logical_map(cpu) (cpu)
void smp_local_timer_interrupt(struct pt_regs * regs);
#endif
#endif
......@@ -27,32 +27,34 @@ typedef struct {
#define spin_unlock_wait(lp) do { barrier(); } while(((volatile spinlock_t *)(lp))->lock)
#define spin_is_locked(x) ((x)->lock != 0)
extern inline void spin_lock(spinlock_t *lp)
extern inline void _raw_spin_lock(spinlock_t *lp)
{
__asm__ __volatile(" bras 1,1f\n"
unsigned int reg1, reg2;
__asm__ __volatile(" bras %0,1f\n"
"0: diag 0,0,68\n"
"1: slr 0,0\n"
" cs 0,1,0(%0)\n"
"1: slr %1,%1\n"
" cs %1,%0,0(%3)\n"
" jl 0b\n"
: : "a" (&lp->lock) : "0", "1", "cc", "memory" );
: "=&d" (reg1), "=&d" (reg2), "+m" (lp->lock)
: "a" (&lp->lock) : "cc" );
}
extern inline int spin_trylock(spinlock_t *lp)
extern inline int _raw_spin_trylock(spinlock_t *lp)
{
unsigned long result;
unsigned long result, reg;
__asm__ __volatile(" slr %0,%0\n"
" basr 1,0\n"
"0: cs %0,1,0(%1)"
: "=&d" (result)
: "a" (&lp->lock) : "1", "cc", "memory" );
" basr %1,0\n"
"0: cs %0,%1,0(%3)"
: "=&d" (result), "=&d" (reg), "+m" (lp->lock)
: "a" (&lp->lock) : "cc" );
return !result;
}
extern inline void spin_unlock(spinlock_t *lp)
extern inline void _raw_spin_unlock(spinlock_t *lp)
{
__asm__ __volatile(" xc 0(4,%0),0(%0)\n"
__asm__ __volatile(" xc 0(4,%1),0(%1)\n"
" bcr 15,0"
: : "a" (&lp->lock) : "memory", "cc" );
: "+m" (lp->lock) : "a" (&lp->lock) : "cc" );
}
/*
......@@ -74,44 +76,48 @@ typedef struct {
#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
#define read_lock(rw) \
asm volatile(" l 2,0(%0)\n" \
#define _raw_read_lock(rw) \
asm volatile(" l 2,0(%1)\n" \
" j 1f\n" \
"0: diag 0,0,68\n" \
"1: la 2,0(2)\n" /* clear high (=write) bit */ \
" la 3,1(2)\n" /* one more reader */ \
" cs 2,3,0(%0)\n" /* try to write new value */ \
" cs 2,3,0(%1)\n" /* try to write new value */ \
" jl 0b" \
: : "a" (&(rw)->lock) : "2", "3", "cc", "memory" );
: "+m" ((rw)->lock) : "a" (&(rw)->lock) \
: "2", "3", "cc" )
#define read_unlock(rw) \
asm volatile(" l 2,0(%0)\n" \
#define _raw_read_unlock(rw) \
asm volatile(" l 2,0(%1)\n" \
" j 1f\n" \
"0: diag 0,0,68\n" \
"1: lr 3,2\n" \
" ahi 3,-1\n" /* one less reader */ \
" cs 2,3,0(%0)\n" \
" cs 2,3,0(%1)\n" \
" jl 0b" \
: : "a" (&(rw)->lock) : "2", "3", "cc", "memory" );
: "+m" ((rw)->lock) : "a" (&(rw)->lock) \
: "2", "3", "cc" )
#define write_lock(rw) \
#define _raw_write_lock(rw) \
asm volatile(" lhi 3,1\n" \
" sll 3,31\n" /* new lock value = 0x80000000 */ \
" j 1f\n" \
"0: diag 0,0,68\n" \
"1: slr 2,2\n" /* old lock value must be 0 */ \
" cs 2,3,0(%0)\n" \
" cs 2,3,0(%1)\n" \
" jl 0b" \
: : "a" (&(rw)->lock) : "2", "3", "cc", "memory" );
: "+m" ((rw)->lock) : "a" (&(rw)->lock) \
: "2", "3", "cc" )
#define write_unlock(rw) \
#define _raw_write_unlock(rw) \
asm volatile(" slr 3,3\n" /* new lock value = 0 */ \
" j 1f\n" \
"0: diag 0,0,68\n" \
"1: lhi 2,1\n" \
" sll 2,31\n" /* old lock value must be 0x80000000 */ \
" cs 2,3,0(%0)\n" \
" cs 2,3,0(%1)\n" \
" jl 0b" \
: : "a" (&(rw)->lock) : "2", "3", "cc", "memory" );
: "+m" ((rw)->lock) : "a" (&(rw)->lock) \
: "2", "3", "cc" )
#endif /* __ASM_SPINLOCK_H */
#ifndef __ASM_S390_SUSPEND_H
#define __ASM_S390_SUSPEND_H
#endif
......@@ -12,18 +12,19 @@
#define __ASM_SYSTEM_H
#include <linux/config.h>
#include <asm/types.h>
#ifdef __KERNEL__
#include <asm/lowcore.h>
#endif
#include <linux/kernel.h>
#define prepare_to_switch() do { } while(0)
#define switch_to(prev,next,last) do { \
#define switch_to(prev,next) do { \
if (prev == next) \
break; \
save_fp_regs1(&prev->thread.fp_regs); \
restore_fp_regs1(&next->thread.fp_regs); \
last = resume(&prev->thread,&next->thread); \
resume(prev,next); \
} while (0)
struct task_struct;
......@@ -97,8 +98,6 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
: "+d&" (x) : "a" (ptr)
: "memory", "cc", "0" );
break;
default:
abort();
}
return x;
}
......@@ -130,26 +129,26 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
/* interrupt control.. */
#define __sti() ({ \
__u8 dummy; \
__u8 __dummy; \
__asm__ __volatile__ ( \
"stosm %0,0x03" : "=m" (dummy) : : "memory"); \
"stosm 0(%0),0x03" : : "a" (&__dummy) : "memory"); \
})
#define __cli() ({ \
__u32 flags; \
__u32 __flags; \
__asm__ __volatile__ ( \
"stnsm %0,0xFC" : "=m" (flags) : : "memory"); \
flags; \
"stnsm 0(%0),0xFC" : : "a" (&__flags) : "memory"); \
__flags; \
})
#define __save_flags(x) \
__asm__ __volatile__("stosm %0,0" : "=m" (x) : : "memory")
__asm__ __volatile__("stosm 0(%0),0" : : "a" (&x) : "memory")
#define __restore_flags(x) \
__asm__ __volatile__("ssm %0" : : "m" (x) : "memory")
__asm__ __volatile__("ssm 0(%0)" : : "a" (&x) : "memory")
#define __load_psw(psw) \
__asm__ __volatile__("lpsw %0" : : "m" (psw) : "cc" );
__asm__ __volatile__("lpsw 0(%0)" : : "a" (&psw) : "cc" );
#define __ctl_load(array, low, high) ({ \
__asm__ __volatile__ ( \
......@@ -170,7 +169,7 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
})
#define __ctl_set_bit(cr, bit) ({ \
__u8 dummy[16]; \
__u8 __dummy[16]; \
__asm__ __volatile__ ( \
" la 1,%0\n" /* align to 8 byte */ \
" ahi 1,7\n" \
......@@ -184,12 +183,12 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
" or 0,%2\n" /* set the bit */ \
" st 0,0(1)\n" \
"1: ex %1,4(2)" /* execute lctl */ \
: "=m" (dummy) : "a" (cr*17), "a" (1<<(bit)) \
: "=m" (__dummy) : "a" (cr*17), "a" (1<<(bit)) \
: "cc", "0", "1", "2"); \
})
#define __ctl_clear_bit(cr, bit) ({ \
__u8 dummy[16]; \
__u8 __dummy[16]; \
__asm__ __volatile__ ( \
" la 1,%0\n" /* align to 8 byte */ \
" ahi 1,7\n" \
......@@ -203,7 +202,7 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
" nr 0,%2\n" /* set the bit */ \
" st 0,0(1)\n" \
"1: ex %1,4(2)" /* execute lctl */ \
: "=m" (dummy) : "a" (cr*17), "a" (~(1<<(bit))) \
: "=m" (__dummy) : "a" (cr*17), "a" (~(1<<(bit))) \
: "cc", "0", "1", "2"); \
})
......@@ -244,12 +243,17 @@ extern void smp_ctl_clear_bit(int cr, int bit);
#endif
#ifdef __KERNEL__
extern struct task_struct *resume(void *,void *);
extern struct task_struct *resume(void *, void *);
extern int save_fp_regs1(s390_fp_regs *fpregs);
extern void save_fp_regs(s390_fp_regs *fpregs);
extern int restore_fp_regs1(s390_fp_regs *fpregs);
extern void restore_fp_regs(s390_fp_regs *fpregs);
extern void (*_machine_restart)(char *command);
extern void (*_machine_halt)(void);
extern void (*_machine_power_off)(void);
#endif
#endif
......
/*************************************************************************
*
* tape390.h
* enables user programs to display messages on the tape device
*
* S390 and zSeries version
* Copyright (C) 2001 IBM Corporation
* Author(s): Despina Papadopoulou <despina_p@de.ibm.com>
*
*************************************************************************/
#ifndef _TAPE390_H
#define _TAPE390_H
#define TAPE390_DISPLAY _IOW('d', 1, struct display_struct)
/*
* The TAPE390_DISPLAY ioctl calls the Load Display command
* which transfers 17 bytes of data from the channel to the subsystem:
* - 1 format control byte, and
* - two 8-byte messages
*
* Format control byte:
* 0-2: New Message Overlay
* 3: Alternate Messages
* 4: Blink Message
* 5: Display Low/High Message
* 6: Reserved
* 7: Automatic Load Request
*
*/
typedef struct display_struct {
char cntrl;
char message1[8];
char message2[8];
} display_struct;
#endif
/*
* include/asm-s390/thread_info.h
*
* S390 version
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
#ifndef _ASM_THREAD_INFO_H
#define _ASM_THREAD_INFO_H
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#include <asm/processor.h>
/*
* low level task data that entry.S needs immediate access to
* - this struct should fit entirely inside of one cache line
* - this struct shares the supervisor stack pages
* - if the contents of this structure are changed, the assembly constants must also be changed
*/
struct thread_info {
struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
unsigned int cpu; /* current CPU */
int preempt_count; /* 0 => preemptable, <0 => BUG */
};
#define PREEMPT_ACTIVE 0x4000000
/*
* macros/functions for gaining access to the thread information structure
*/
#define INIT_THREAD_INFO(tsk) \
{ \
task: &tsk, \
exec_domain: &default_exec_domain, \
flags: 0, \
cpu: 0, \
}
#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
{
return (struct thread_info *)((*(unsigned long *) 0xc40)-8192);
}
/* thread information allocation */
#define alloc_thread_info() ((struct thread_info *) \
__get_free_pages(GFP_KERNEL,1))
#define free_thread_info(ti) free_pages((unsigned long) (ti), 1)
#define get_thread_info(ti) get_task_struct((ti)->task)
#define put_thread_info(ti) put_task_struct((ti)->task)
#endif
/*
* Size of kernel stack for each process
*/
#define THREAD_SIZE (2*PAGE_SIZE)
/*
* thread information flags bit numbers
*/
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling
TIF_NEED_RESCHED */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_USEDFPU (1<<TIF_USEDFPU)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#endif /* __KERNEL__ */
#endif /* _ASM_THREAD_INFO_H */
......@@ -17,13 +17,24 @@
(1000000/CLOCK_TICK_FACTOR) / (CLOCK_TICK_RATE/CLOCK_TICK_FACTOR)) \
<< (SHIFT_SCALE-SHIFT_HZ)) / HZ)
typedef unsigned long cycles_t;
typedef unsigned long long cycles_t;
extern cycles_t cacheflush_time;
static inline cycles_t get_cycles(void)
{
return 0;
cycles_t cycles;
__asm__("stck 0(%0)" : : "a" (&(cycles)) : "memory", "cc");
return cycles >> 2;
}
static inline unsigned long long get_clock (void)
{
unsigned long long clk;
__asm__("stck 0(%0)" : : "a" (&(clk)) : "memory", "cc");
return clk;
}
#endif
#ifndef _S390_TLB_H
#define _S390_TLB_H
/*
* s390 doesn't need any special per-pte or
* per-vma handling..
*/
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
/*
* .. because we flush the whole mm when it
* fills up.
*/
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#include <asm-generic/tlb.h>
#endif
#ifndef _S390_TLBFLUSH_H
#define _S390_TLBFLUSH_H
#include <linux/config.h>
#include <linux/mm.h>
#include <asm/processor.h>
/*
* TLB flushing:
*
* - flush_tlb() flushes the current mm struct TLBs
* - flush_tlb_all() flushes all processes TLBs
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
* - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
*/
/*
* S/390 has three ways of flushing TLBs
* 'ptlb' does a flush of the local processor
* 'csp' flushes the TLBs on all PUs of a SMP
* 'ipte' invalidates a pte in a page table and flushes that out of
* the TLBs of all PUs of a SMP
*/
#define local_flush_tlb() \
do { __asm__ __volatile__("ptlb": : :"memory"); } while (0)
#ifndef CONFIG_SMP
/*
* We always need to flush, since s390 does not flush tlb
* on each context switch
*/
static inline void flush_tlb(void)
{
local_flush_tlb();
}
static inline void flush_tlb_all(void)
{
local_flush_tlb();
}
static inline void flush_tlb_mm(struct mm_struct *mm)
{
local_flush_tlb();
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
local_flush_tlb();
}
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
local_flush_tlb();
}
#define flush_tlb_kernel_range(start, end) \
local_flush_tlb();
#else
#include <asm/smp.h>
extern void smp_ptlb_all(void);
static inline void global_flush_tlb(void)
{
if (MACHINE_HAS_CSP) {
long dummy = 0;
__asm__ __volatile__ (
" la 4,1(%0)\n"
" slr 2,2\n"
" slr 3,3\n"
" csp 2,4"
: : "a" (&dummy) : "cc", "2", "3", "4" );
} else
smp_ptlb_all();
}
/*
* We only have to do global flush of tlb if process run since last
* flush on any other pu than current.
* If we have threads (mm->count > 1) we always do a global flush,
* since the process runs on more than one processor at the same time.
*/
static inline void __flush_tlb_mm(struct mm_struct * mm)
{
if ((smp_num_cpus > 1) &&
((atomic_read(&mm->mm_count) != 1) ||
(mm->cpu_vm_mask != (1UL << smp_processor_id())))) {
mm->cpu_vm_mask = (1UL << smp_processor_id());
global_flush_tlb();
} else {
local_flush_tlb();
}
}
static inline void flush_tlb(void)
{
__flush_tlb_mm(current->mm);
}
static inline void flush_tlb_all(void)
{
global_flush_tlb();
}
static inline void flush_tlb_mm(struct mm_struct *mm)
{
__flush_tlb_mm(mm);
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
__flush_tlb_mm(vma->vm_mm);
}
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
__flush_tlb_mm(vma->vm_mm);
}
#define flush_tlb_kernel_range(start, end) \
__flush_tlb_mm(&init_mm)
#endif
static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
/* S/390 does not keep any page table caches in TLB */
}
#endif /* _S390_TLBFLUSH_H */
......@@ -15,6 +15,7 @@
* User space memory access functions
*/
#include <linux/sched.h>
#include <linux/errno.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
......@@ -28,21 +29,22 @@
* For historical reasons, these macros are grossly misnamed.
*/
#define MAKE_MM_SEG(s,a) ((mm_segment_t) { (s),(a) })
#define MAKE_MM_SEG(a) ((mm_segment_t) { (a) })
#define KERNEL_DS MAKE_MM_SEG(0x7FFFFFFF,0)
#define USER_DS MAKE_MM_SEG(PAGE_OFFSET,1)
#define KERNEL_DS MAKE_MM_SEG(0)
#define USER_DS MAKE_MM_SEG(1)
#define get_ds() (KERNEL_DS)
#define get_fs() (current->thread.fs)
#define set_fs(x) ({asm volatile("sar 4,%0"::"a" (x.acc4)); \
current->thread.fs = (x);})
#define get_fs() ({ mm_segment_t __x; \
asm volatile("ear %0,4":"=a" (__x)); \
__x;})
#define set_fs(x) ({asm volatile("sar 4,%0"::"a" ((x).ar4));})
#define segment_eq(a,b) ((a).acc4 == (b).acc4)
#define segment_eq(a,b) ((a).ar4 == (b).ar4)
#define __access_ok(addr,size) ((((long) addr + size)&0x7FFFFFFFL) < current->addr_limit.seg)
#define __access_ok(addr,size) (1)
#define access_ok(type,addr,size) __access_ok(addr,size)
......@@ -78,20 +80,20 @@ extern unsigned long search_exception_table(unsigned long);
* use the right size if we just have the right pointer type.
*/
extern inline int __put_user_asm_8(__u64 x, void *ptr)
extern inline int __put_user_asm_8(void *x, void *ptr)
{
int err;
__asm__ __volatile__ ( " sr %1,%1\n"
" la 2,%2\n"
" la 4,%0\n"
__asm__ __volatile__ ( " sr %0,01\n"
" lr 2,%1\n"
" lr 4,%2\n"
" sacf 512\n"
"0: mvc 0(8,4),0(2)\n"
" sacf 0\n"
"1:\n"
".section .fixup,\"ax\"\n"
"2: sacf 0\n"
" lhi %1,%h3\n"
" lhi %0,%h3\n"
" bras 4,3f\n"
" .long 1b\n"
"3: l 4,0(4)\n"
......@@ -101,8 +103,8 @@ extern inline int __put_user_asm_8(__u64 x, void *ptr)
" .align 4\n"
" .long 0b,2b\n"
".previous"
: "=m" (*((__u32*) ptr)), "=&d" (err)
: "m" (x), "K" (-EFAULT)
: "=&d" (err)
: "d" (x), "d" (ptr), "K" (-EFAULT)
: "cc", "2", "4" );
return err;
}
......@@ -111,15 +113,15 @@ extern inline int __put_user_asm_4(__u32 x, void *ptr)
{
int err;
__asm__ __volatile__ ( " sr %1,%1\n"
" la 4,%0\n"
__asm__ __volatile__ ( " sr %0,%0\n"
" lr 4,%2\n"
" sacf 512\n"
"0: st %2,0(4)\n"
"0: st %1,0(4)\n"
" sacf 0\n"
"1:\n"
".section .fixup,\"ax\"\n"
"2: sacf 0\n"
" lhi %1,%h3\n"
" lhi %0,%h3\n"
" bras 4,3f\n"
" .long 1b\n"
"3: l 4,0(4)\n"
......@@ -129,8 +131,8 @@ extern inline int __put_user_asm_4(__u32 x, void *ptr)
" .align 4\n"
" .long 0b,2b\n"
".previous"
: "=m" (*((__u32*) ptr)) , "=&d" (err)
: "d" (x), "K" (-EFAULT)
: "=&d" (err)
: "d" (x), "d" (ptr), "K" (-EFAULT)
: "cc", "4" );
return err;
}
......@@ -139,15 +141,15 @@ extern inline int __put_user_asm_2(__u16 x, void *ptr)
{
int err;
__asm__ __volatile__ ( " sr %1,%1\n"
" la 4,%0\n"
__asm__ __volatile__ ( " sr %0,%0\n"
" lr 4,%2\n"
" sacf 512\n"
"0: sth %2,0(4)\n"
"0: sth %1,0(4)\n"
" sacf 0\n"
"1:\n"
".section .fixup,\"ax\"\n"
"2: sacf 0\n"
" lhi %1,%h3\n"
" lhi %0,%h3\n"
" bras 4,3f\n"
" .long 1b\n"
"3: l 4,0(4)\n"
......@@ -157,8 +159,8 @@ extern inline int __put_user_asm_2(__u16 x, void *ptr)
" .align 4\n"
" .long 0b,2b\n"
".previous"
: "=m" (*((__u16*) ptr)) , "=&d" (err)
: "d" (x), "K" (-EFAULT)
: "=&d" (err)
: "d" (x), "d" (ptr), "K" (-EFAULT)
: "cc", "4" );
return err;
}
......@@ -167,15 +169,15 @@ extern inline int __put_user_asm_1(__u8 x, void *ptr)
{
int err;
__asm__ __volatile__ ( " sr %1,%1\n"
" la 4,%0\n"
__asm__ __volatile__ ( " sr %0,%0\n"
" lr 4,%2\n"
" sacf 512\n"
"0: stc %2,0(4)\n"
"0: stc %1,0(4)\n"
" sacf 0\n"
"1:\n"
".section .fixup,\"ax\"\n"
"2: sacf 0\n"
" lhi %1,%h3\n"
" lhi %0,%h3\n"
" bras 4,3f\n"
" .long 1b\n"
"3: l 4,0(4)\n"
......@@ -185,8 +187,8 @@ extern inline int __put_user_asm_1(__u8 x, void *ptr)
" .align 4\n"
" .long 0b,2b\n"
".previous"
: "=m" (*((__u8*) ptr)) , "=&d" (err)
: "d" (x), "K" (-EFAULT)
: "=&d" (err)
: "d" (x), "d" (ptr), "K" (-EFAULT)
: "cc", "4" );
return err;
}
......@@ -197,19 +199,24 @@ extern inline int __put_user_asm_1(__u8 x, void *ptr)
*/
#define __put_user(x, ptr) \
({ \
__typeof__(*(ptr)) *__pu_addr = (ptr); \
__typeof__(*(ptr)) __x = (x); \
int __pu_err; \
switch (sizeof (*(ptr))) { \
case 1: \
__pu_err = __put_user_asm_1((__u8)(__u32)x,(ptr));\
__pu_err = __put_user_asm_1((__u8)(__u32) __x, \
__pu_addr); \
break; \
case 2: \
__pu_err = __put_user_asm_2((__u16)(__u32)x,(ptr));\
__pu_err = __put_user_asm_2((__u16)(__u32) __x, \
__pu_addr); \
break; \
case 4: \
__pu_err = __put_user_asm_4((__u32) x,(ptr));\
__pu_err = __put_user_asm_4((__u32) __x, \
__pu_addr); \
break; \
case 8: \
__pu_err = __put_user_asm_8((__u64) x,(ptr));\
__pu_err = __put_user_asm_8(&__x, __pu_addr); \
break; \
default: \
__pu_err = __put_user_bad(); \
......@@ -218,17 +225,7 @@ extern inline int __put_user_asm_1(__u8 x, void *ptr)
__pu_err; \
})
#define put_user(x, ptr) \
({ \
long __pu_err = -EFAULT; \
__typeof__(*(ptr)) *__pu_addr = (ptr); \
__typeof__(*(ptr)) __x = (x); \
if (__access_ok((long)__pu_addr,sizeof(*(ptr)))) { \
__pu_err = 0; \
__put_user((__x), (__pu_addr)); \
} \
__pu_err; \
})
#define put_user(x, ptr) __put_user(x, ptr)
extern int __put_user_bad(void);
......@@ -336,77 +333,44 @@ extern int __put_user_bad(void);
#define __get_user(x, ptr) \
({ \
__typeof__(ptr) __gu_addr = (ptr); \
__typeof__(*(ptr)) __x; \
int __gu_err; \
switch (sizeof(*(ptr))) { \
switch (sizeof(*(__gu_addr))) { \
case 1: \
__get_user_asm_1(x,ptr,__gu_err); \
__get_user_asm_1(__x, __gu_addr, __gu_err); \
break; \
case 2: \
__get_user_asm_2(x,ptr,__gu_err); \
__get_user_asm_2(__x, __gu_addr, __gu_err); \
break; \
case 4: \
__get_user_asm_4(x,ptr,__gu_err); \
__get_user_asm_4(__x, __gu_addr, __gu_err); \
break; \
case 8: \
__get_user_asm_8(x,ptr,__gu_err); \
__get_user_asm_8(__x, __gu_addr, __gu_err); \
break; \
default: \
(x) = 0; \
__x = 0; \
__gu_err = __get_user_bad(); \
break; \
} \
__gu_err; \
})
#define get_user(x, ptr) \
({ \
long __gu_err = -EFAULT; \
__typeof__(ptr) __gu_addr = (ptr); \
__typeof__(*(ptr)) __x; \
if (__access_ok((long)__gu_addr,sizeof(*(ptr)))) { \
__gu_err = 0; \
__get_user((__x), (__gu_addr)); \
(x) = __x; \
} \
else \
(x) = 0; \
__gu_err; \
})
#define get_user(x, ptr) __get_user(x, ptr)
extern int __get_user_bad(void);
/*
* access register are set up, that 4 points to secondary (user) , 2 to primary (kernel)
*/
asmlinkage void __copy_from_user_fixup(void /* special calling convention */);
asmlinkage void __copy_to_user_fixup(void /* special calling convention */);
extern inline unsigned long
__copy_to_user_asm(void* to, const void* from, long n)
{
__asm__ __volatile__ ( " lr 2,%2\n"
" lr 4,%1\n"
" lr 3,%0\n"
" lr 5,3\n"
" sacf 512\n"
"0: mvcle 4,2,0\n"
" jo 0b\n"
" sacf 0\n"
" lr %0,3\n"
".section __ex_table,\"a\"\n"
" .align 4\n"
" .long 0b,__copy_to_user_fixup\n"
".previous"
: "+&d" (n) : "d" (to), "d" (from)
: "cc", "2", "3", "4", "5" );
return n;
}
extern long __copy_to_user_asm(const void *from, long n, const void *to);
#define __copy_to_user(to, from, n) \
({ \
__copy_to_user_asm(to,from,n); \
__copy_to_user_asm(from, n, to); \
})
#define copy_to_user(to, from, n) \
......@@ -414,38 +378,18 @@ __copy_to_user_asm(void* to, const void* from, long n)
long err = 0; \
__typeof__(n) __n = (n); \
if (__access_ok(to,__n)) { \
err = __copy_to_user_asm(to,from,__n); \
err = __copy_to_user_asm(from, __n, to); \
} \
else \
err = __n; \
err; \
})
extern inline unsigned long
__copy_from_user_asm(void* to, const void* from, long n)
{
__asm__ __volatile__ ( " lr 2,%1\n"
" lr 4,%2\n"
" lr 3,%0\n"
" lr 5,3\n"
" sacf 512\n"
"0: mvcle 2,4,0\n"
" jo 0b\n"
" sacf 0\n"
" lr %0,5\n"
".section __ex_table,\"a\"\n"
" .align 4\n"
" .long 0b,__copy_from_user_fixup\n"
".previous"
: "+&d" (n) : "d" (to), "d" (from)
: "cc", "2", "3", "4", "5" );
return n;
}
extern long __copy_from_user_asm(void *to, long n, const void *from);
#define __copy_from_user(to, from, n) \
({ \
__copy_from_user_asm(to,from,n); \
__copy_from_user_asm(to, n, from); \
})
#define copy_from_user(to, from, n) \
......@@ -453,7 +397,7 @@ __copy_from_user_asm(void* to, const void* from, long n)
long err = 0; \
__typeof__(n) __n = (n); \
if (__access_ok(from,__n)) { \
err = __copy_from_user_asm(to,from,__n); \
err = __copy_from_user_asm(to, __n, from); \
} \
else \
err = __n; \
......@@ -550,38 +494,12 @@ strnlen_user(const char * src, unsigned long n)
* Zero Userspace
*/
static inline unsigned long
__clear_user(void *to, unsigned long n)
{
__asm__ __volatile__ ( " sacf 512\n"
" lr 4,%1\n"
" lr 5,%0\n"
" sr 2,2\n"
" sr 3,3\n"
"0: mvcle 4,2,0\n"
" jo 0b\n"
" sacf 0\n"
"1: lr %0,3\n"
".section .fixup,\"ax\"\n"
"2: lhi 5,-4096\n"
" n 5,0x90\n"
" sr 5,4\n"
" mvcle 4,2,0\n"
" sacf 0\n"
" basr 4,0\n"
" l 4,3f-.(4)\n"
" br 4\n"
"3: .long 1b\n"
".previous\n"
".section __ex_table,\"a\"\n"
" .align 4\n"
" .long 0b,2b\n"
".previous"
: "+&a" (n)
: "a" (to)
: "cc", "2", "3", "4", "5" );
return n;
}
extern long __clear_user_asm(void *to, long n);
#define __clear_user(to, n) \
({ \
__clear_user_asm(to, n); \
})
static inline unsigned long
clear_user(void *to, unsigned long n)
......
......@@ -211,8 +211,26 @@
#define __NR_mincore 218
#define __NR_madvise 219
#define __NR_getdents64 220
#define __NR_gettid 226
#define __NR_tkill 227
#define __NR_fcntl64 221
#define __NR_readahead 222
#define __NR_sendfile64 223
#define __NR_setxattr 224
#define __NR_lsetxattr 225
#define __NR_fsetxattr 226
#define __NR_getxattr 227
#define __NR_lgetxattr 228
#define __NR_fgetxattr 229
#define __NR_listxattr 230
#define __NR_llistxattr 231
#define __NR_flistxattr 232
#define __NR_removexattr 233
#define __NR_lremovexattr 234
#define __NR_fremovexattr 235
#define __NR_gettid 236
#define __NR_tkill 237
#define __NR_futex 238
#define __NR_sched_setaffinity 239
#define __NR_sched_getaffinity 240
/* user-visible error numbers are in the range -1 - -122: see <asm-s390/errno.h> */
......@@ -335,6 +353,8 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
#ifdef __KERNEL_SYSCALLS__
#include <asm/stat.h>
/*
* we need this inline - forking from kernel space will result
* in NO COPY ON WRITE (!!!), until an execve is executed. This
......@@ -363,7 +383,9 @@ static inline _syscall1(int,_exit,int,exitcode)
static inline _syscall1(int,delete_module,const char *,name)
static inline _syscall2(long,stat,char *,filename,struct stat *,statbuf)
static inline pid_t waitpid(int pid, int * wait_stat, int flags)
struct rusage;
extern long sys_wait4(pid_t, unsigned int *, int, struct rusage *);
static inline pid_t waitpid(int pid, int *wait_stat, int flags)
{
return sys_wait4(pid, wait_stat, flags, NULL);
}
......
......@@ -18,7 +18,6 @@
#include <asm/dasd.h>
#endif
#define DASD_API_VERSION 0
#define LINE_LENGTH 80
#define VTOC_START_CC 0x0
......
......@@ -26,29 +26,17 @@ typedef struct { volatile int counter; } __attribute__ ((aligned (4))) atomic_t;
#define atomic_eieio() __asm__ __volatile__ ("BCR 15,0")
#define __CS_LOOP(old_val, new_val, ptr, op_val, op_string) \
__asm__ __volatile__(" l %0,0(%2)\n" \
__asm__ __volatile__(" l %0,0(%3)\n" \
"0: lr %1,%0\n" \
op_string " %1,%3\n" \
" cs %0,%1,0(%2)\n" \
op_string " %1,%4\n" \
" cs %0,%1,0(%3)\n" \
" jl 0b" \
: "=&d" (old_val), "=&d" (new_val) \
: "=&d" (old_val), "=&d" (new_val), \
"+m" (((atomic_t *)(ptr))->counter) \
: "a" (ptr), "d" (op_val) : "cc" );
static __inline__ int atomic_read(atomic_t *v)
{
int retval;
__asm__ __volatile__("bcr 15,0\n\t"
"l %0,%1"
: "=d" (retval) : "m" (*v) );
return retval;
}
static __inline__ void atomic_set(atomic_t *v, int i)
{
__asm__ __volatile__("st %1,%0\n\t"
"bcr 15,0"
: "=m" (*v) : "d" (i) );
}
#define atomic_read(v) ((v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i))
static __inline__ void atomic_add(int i, atomic_t *v)
{
......@@ -138,14 +126,14 @@ atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v)
int retval;
__asm__ __volatile__(
" lr 0,%2\n"
" cs 0,%3,0(%1)\n"
" lr %0,%3\n"
" cs %0,%4,0(%2)\n"
" ipm %0\n"
" srl %0,28\n"
"0:"
: "=&d" (retval)
: "=&d" (retval), "+m" (v->counter)
: "a" (v), "d" (expected_oldval) , "d" (new_val)
: "0", "cc");
: "cc");
return retval;
}
......@@ -155,12 +143,14 @@ atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v)
static __inline__ void
atomic_compare_and_swap_spin(int expected_oldval,int new_val,atomic_t *v)
{
unsigned long tmp;
__asm__ __volatile__(
"0: lr 0,%1\n"
" cs 0,%2,0(%0)\n"
"0: lr %1,%3\n"
" cs %1,%4,0(%2)\n"
" jl 0b\n"
: : "a" (v), "d" (expected_oldval) , "d" (new_val)
: "cc", "0" );
: "+m" (v->counter), "=&d" (tmp)
: "a" (v), "d" (expected_oldval) , "d" (new_val)
: "cc" );
}
#define atomic_compare_and_swap_debug(where,from,to) \
......
......@@ -51,268 +51,218 @@
extern const char _oi_bitmap[];
extern const char _ni_bitmap[];
extern const char _zb_findmap[];
extern const char _sb_findmap[];
#ifdef CONFIG_SMP
/*
* SMP save set_bit routine based on compare and swap (CS)
*/
static __inline__ void set_bit_cs(unsigned long nr, volatile void * addr)
static inline void set_bit_cs(unsigned long nr, volatile void *ptr)
{
unsigned long bits, mask;
__asm__ __volatile__(
unsigned long addr, old, new, mask;
addr = (unsigned long) ptr;
#if ALIGN_CS == 1
" lghi %2,7\n" /* CS must be aligned on 4 byte b. */
" ngr %2,%1\n" /* isolate last 2 bits of address */
" xgr %1,%2\n" /* make addr % 4 == 0 */
" sllg %2,%2,3\n"
" agr %0,%2\n" /* add alignement to bitnr */
addr ^= addr & 7; /* align address to 8 */
nr += (addr & 7) << 3; /* add alignment to bit number */
#endif
" lghi %2,63\n"
" nr %2,%0\n" /* make shift value */
" xr %0,%2\n"
" srlg %0,%0,3\n"
" lghi %3,1\n"
" la %1,0(%0,%1)\n" /* calc. address for CS */
" sllg %3,%3,0(%2)\n" /* make OR mask */
" lg %0,0(%1)\n"
"0: lgr %2,%0\n" /* CS loop starts here */
" ogr %2,%3\n" /* set bit */
" csg %0,%2,0(%1)\n"
addr += (nr ^ (nr & 63)) >> 3; /* calculate address for CS */
mask = 1UL << (nr & 63); /* make OR mask */
asm volatile(
" lg %0,0(%4)\n"
"0: lgr %1,%0\n"
" ogr %1,%3\n"
" csg %0,%1,0(%4)\n"
" jl 0b"
: "+a" (nr), "+a" (addr), "=a" (bits), "=d" (mask) :
: "cc", "memory" );
: "=&d" (old), "=&d" (new), "+m" (*(unsigned long *) addr)
: "d" (mask), "a" (addr)
: "cc" );
}
/*
* SMP save clear_bit routine based on compare and swap (CS)
*/
static __inline__ void clear_bit_cs(unsigned long nr, volatile void * addr)
static inline void clear_bit_cs(unsigned long nr, volatile void *ptr)
{
unsigned long bits, mask;
__asm__ __volatile__(
unsigned long addr, old, new, mask;
addr = (unsigned long) ptr;
#if ALIGN_CS == 1
" lghi %2,7\n" /* CS must be aligned on 4 byte b. */
" ngr %2,%1\n" /* isolate last 2 bits of address */
" xgr %1,%2\n" /* make addr % 4 == 0 */
" sllg %2,%2,3\n"
" agr %0,%2\n" /* add alignement to bitnr */
addr ^= addr & 7; /* align address to 8 */
nr += (addr & 7) << 3; /* add alignment to bit number */
#endif
" lghi %2,63\n"
" nr %2,%0\n" /* make shift value */
" xr %0,%2\n"
" srlg %0,%0,3\n"
" lghi %3,-2\n"
" la %1,0(%0,%1)\n" /* calc. address for CS */
" lghi %3,-2\n"
" rllg %3,%3,0(%2)\n" /* make AND mask */
" lg %0,0(%1)\n"
"0: lgr %2,%0\n" /* CS loop starts here */
" ngr %2,%3\n" /* clear bit */
" csg %0,%2,0(%1)\n"
addr += (nr ^ (nr & 63)) >> 3; /* calculate address for CS */
mask = ~(1UL << (nr & 63)); /* make AND mask */
asm volatile(
" lg %0,0(%4)\n"
"0: lgr %1,%0\n"
" ngr %1,%3\n"
" csg %0,%1,0(%4)\n"
" jl 0b"
: "+a" (nr), "+a" (addr), "=a" (bits), "=d" (mask) :
: "cc", "memory" );
: "=&d" (old), "=&d" (new), "+m" (*(unsigned long *) addr)
: "d" (mask), "a" (addr)
: "cc" );
}
/*
* SMP save change_bit routine based on compare and swap (CS)
*/
static __inline__ void change_bit_cs(unsigned long nr, volatile void * addr)
static inline void change_bit_cs(unsigned long nr, volatile void *ptr)
{
unsigned long bits, mask;
__asm__ __volatile__(
unsigned long addr, old, new, mask;
addr = (unsigned long) ptr;
#if ALIGN_CS == 1
" lghi %2,7\n" /* CS must be aligned on 4 byte b. */
" ngr %2,%1\n" /* isolate last 2 bits of address */
" xgr %1,%2\n" /* make addr % 4 == 0 */
" sllg %2,%2,3\n"
" agr %0,%2\n" /* add alignement to bitnr */
addr ^= addr & 7; /* align address to 8 */
nr += (addr & 7) << 3; /* add alignment to bit number */
#endif
" lghi %2,63\n"
" nr %2,%0\n" /* make shift value */
" xr %0,%2\n"
" srlg %0,%0,3\n"
" lghi %3,1\n"
" la %1,0(%0,%1)\n" /* calc. address for CS */
" sllg %3,%3,0(%2)\n" /* make XR mask */
" lg %0,0(%1)\n"
"0: lgr %2,%0\n" /* CS loop starts here */
" xgr %2,%3\n" /* change bit */
" csg %0,%2,0(%1)\n"
addr += (nr ^ (nr & 63)) >> 3; /* calculate address for CS */
mask = 1UL << (nr & 63); /* make XOR mask */
asm volatile(
" lg %0,0(%4)\n"
"0: lgr %1,%0\n"
" xgr %1,%3\n"
" csg %0,%1,0(%4)\n"
" jl 0b"
: "+a" (nr), "+a" (addr), "=a" (bits), "=d" (mask) :
: "cc", "memory" );
: "=&d" (old), "=&d" (new), "+m" (*(unsigned long *) addr)
: "d" (mask), "a" (addr)
: "cc" );
}
/*
* SMP save test_and_set_bit routine based on compare and swap (CS)
*/
static __inline__ int
test_and_set_bit_cs(unsigned long nr, volatile void * addr)
static inline int
test_and_set_bit_cs(unsigned long nr, volatile void *ptr)
{
unsigned long bits, mask;
__asm__ __volatile__(
unsigned long addr, old, new, mask;
addr = (unsigned long) ptr;
#if ALIGN_CS == 1
" lghi %2,7\n" /* CS must be aligned on 4 byte b. */
" ngr %2,%1\n" /* isolate last 2 bits of address */
" xgr %1,%2\n" /* make addr % 4 == 0 */
" sllg %2,%2,3\n"
" agr %0,%2\n" /* add alignement to bitnr */
addr ^= addr & 7; /* align address to 8 */
nr += (addr & 7) << 3; /* add alignment to bit number */
#endif
" lghi %2,63\n"
" nr %2,%0\n" /* make shift value */
" xr %0,%2\n"
" srlg %0,%0,3\n"
" lghi %3,1\n"
" la %1,0(%0,%1)\n" /* calc. address for CS */
" sllg %3,%3,0(%2)\n" /* make OR mask */
" lg %0,0(%1)\n"
"0: lgr %2,%0\n" /* CS loop starts here */
" ogr %2,%3\n" /* set bit */
" csg %0,%2,0(%1)\n"
" jl 0b\n"
" ngr %0,%3\n" /* isolate old bit */
: "+a" (nr), "+a" (addr), "=a" (bits), "=d" (mask) :
: "cc", "memory" );
return nr != 0;
addr += (nr ^ (nr & 63)) >> 3; /* calculate address for CS */
mask = 1UL << (nr & 63); /* make OR/test mask */
asm volatile(
" lg %0,0(%4)\n"
"0: lgr %1,%0\n"
" ogr %1,%3\n"
" csg %0,%1,0(%4)\n"
" jl 0b"
: "=&d" (old), "=&d" (new), "+m" (*(unsigned long *) addr)
: "d" (mask), "a" (addr)
: "cc" );
return (old & mask) != 0;
}
/*
* SMP save test_and_clear_bit routine based on compare and swap (CS)
*/
static __inline__ int
test_and_clear_bit_cs(unsigned long nr, volatile void * addr)
static inline int
test_and_clear_bit_cs(unsigned long nr, volatile void *ptr)
{
unsigned long bits, mask;
__asm__ __volatile__(
unsigned long addr, old, new, mask;
addr = (unsigned long) ptr;
#if ALIGN_CS == 1
" lghi %2,7\n" /* CS must be aligned on 4 byte b. */
" ngr %2,%1\n" /* isolate last 2 bits of address */
" xgr %1,%2\n" /* make addr % 4 == 0 */
" sllg %2,%2,3\n"
" agr %0,%2\n" /* add alignement to bitnr */
addr ^= addr & 7; /* align address to 8 */
nr += (addr & 7) << 3; /* add alignment to bit number */
#endif
" lghi %2,63\n"
" nr %2,%0\n" /* make shift value */
" xr %0,%2\n"
" srlg %0,%0,3\n"
" lghi %3,-2\n"
" la %1,0(%0,%1)\n" /* calc. address for CS */
" rllg %3,%3,0(%2)\n" /* make AND mask */
" lg %0,0(%1)\n"
"0: lgr %2,%0\n" /* CS loop starts here */
" ngr %2,%3\n" /* clear bit */
" csg %0,%2,0(%1)\n"
" jl 0b\n"
" xgr %0,%2\n" /* isolate old bit */
: "+a" (nr), "+a" (addr), "=a" (bits), "=d" (mask) :
: "cc", "memory" );
return nr != 0;
addr += (nr ^ (nr & 63)) >> 3; /* calculate address for CS */
mask = ~(1UL << (nr & 63)); /* make AND mask */
asm volatile(
" lg %0,0(%4)\n"
"0: lgr %1,%0\n"
" ngr %1,%3\n"
" csg %0,%1,0(%4)\n"
" jl 0b"
: "=&d" (old), "=&d" (new), "+m" (*(unsigned long *) addr)
: "d" (mask), "a" (addr)
: "cc" );
return (old ^ new) != 0;
}
/*
* SMP save test_and_change_bit routine based on compare and swap (CS)
*/
static __inline__ int
test_and_change_bit_cs(unsigned long nr, volatile void * addr)
static inline int
test_and_change_bit_cs(unsigned long nr, volatile void *ptr)
{
unsigned long bits, mask;
__asm__ __volatile__(
unsigned long addr, old, new, mask;
addr = (unsigned long) ptr;
#if ALIGN_CS == 1
" lghi %2,7\n" /* CS must be aligned on 4 byte b. */
" ngr %2,%1\n" /* isolate last 2 bits of address */
" xgr %1,%2\n" /* make addr % 4 == 0 */
" sllg %2,%2,3\n"
" agr %0,%2\n" /* add alignement to bitnr */
addr ^= addr & 7; /* align address to 8 */
nr += (addr & 7) << 3; /* add alignment to bit number */
#endif
" lghi %2,63\n"
" nr %2,%0\n" /* make shift value */
" xr %0,%2\n"
" srlg %0,%0,3\n"
" lghi %3,1\n"
" la %1,0(%0,%1)\n" /* calc. address for CS */
" sllg %3,%3,0(%2)\n" /* make OR mask */
" lg %0,0(%1)\n"
"0: lgr %2,%0\n" /* CS loop starts here */
" xgr %2,%3\n" /* change bit */
" csg %0,%2,0(%1)\n"
" jl 0b\n"
" ngr %0,%3\n" /* isolate old bit */
: "+a" (nr), "+a" (addr), "=a" (bits), "=d" (mask) :
: "cc", "memory" );
return nr != 0;
addr += (nr ^ (nr & 63)) >> 3; /* calculate address for CS */
mask = 1UL << (nr & 63); /* make XOR mask */
asm volatile(
" lg %0,0(%4)\n"
"0: lgr %1,%0\n"
" xgr %1,%3\n"
" csg %0,%1,0(%4)\n"
" jl 0b"
: "=&d" (old), "=&d" (new), "+m" (*(unsigned long *) addr)
: "d" (mask), "a" (addr)
: "cc" );
return (old & mask) != 0;
}
#endif /* CONFIG_SMP */
/*
* fast, non-SMP set_bit routine
*/
static __inline__ void __set_bit(unsigned long nr, volatile void * addr)
static inline void __set_bit(unsigned long nr, volatile void *ptr)
{
__asm__ __volatile__(
" lghi 2,56\n"
" lghi 1,7\n"
" xgr 2,%0\n"
" nr 1,%0\n"
" srlg 2,2,3\n"
" la 2,0(2,%1)\n"
" la 1,0(1,%2)\n"
" oc 0(1,2),0(1)"
: : "a" (nr), "a" (addr), "a" (&_oi_bitmap)
: "cc", "memory", "1", "2" );
unsigned long addr;
addr = (unsigned long) ptr + ((nr ^ 56) >> 3);
asm volatile("oc 0(1,%1),0(%2)"
: "+m" (*(char *) addr)
: "a" (addr), "a" (_oi_bitmap + (nr & 7))
: "cc" );
}
static __inline__ void
__constant_set_bit(const unsigned long nr, volatile void * addr)
static inline void
__constant_set_bit(const unsigned long nr, volatile void *ptr)
{
unsigned long addr;
addr = ((unsigned long) ptr) + ((nr >> 3) ^ 7);
switch (nr&7) {
case 0:
__asm__ __volatile__ ("la 1,%0\n\t"
"oi 0(1),0x01"
: "=m" (*((volatile char *) addr + ((nr>>3)^7)))
: : "1", "cc", "memory");
asm volatile ("oi 0(%1),0x01"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 1:
__asm__ __volatile__ ("la 1,%0\n\t"
"oi 0(1),0x02"
: "=m" (*((volatile char *) addr + ((nr>>3)^7)))
: : "1", "cc", "memory" );
asm volatile ("oi 0(%1),0x02"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 2:
__asm__ __volatile__ ("la 1,%0\n\t"
"oi 0(1),0x04"
: "=m" (*((volatile char *) addr + ((nr>>3)^7)))
: : "1", "cc", "memory" );
asm volatile ("oi 0(%1),0x04"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 3:
__asm__ __volatile__ ("la 1,%0\n\t"
"oi 0(1),0x08"
: "=m" (*((volatile char *) addr + ((nr>>3)^7)))
: : "1", "cc", "memory" );
asm volatile ("oi 0(%1),0x08"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 4:
__asm__ __volatile__ ("la 1,%0\n\t"
"oi 0(1),0x10"
: "=m" (*((volatile char *) addr + ((nr>>3)^7)))
: : "1", "cc", "memory" );
asm volatile ("oi 0(%1),0x10"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 5:
__asm__ __volatile__ ("la 1,%0\n\t"
"oi 0(1),0x20"
: "=m" (*((volatile char *) addr + ((nr>>3)^7)))
: : "1", "cc", "memory" );
asm volatile ("oi 0(%1),0x20"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 6:
__asm__ __volatile__ ("la 1,%0\n\t"
"oi 0(1),0x40"
: "=m" (*((volatile char *) addr + ((nr>>3)^7)))
: : "1", "cc", "memory" );
asm volatile ("oi 0(%1),0x40"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 7:
__asm__ __volatile__ ("la 1,%0\n\t"
"oi 0(1),0x80"
: "=m" (*((volatile char *) addr + ((nr>>3)^7)))
: : "1", "cc", "memory" );
asm volatile ("oi 0(%1),0x80"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
}
}
......@@ -325,73 +275,56 @@ __constant_set_bit(const unsigned long nr, volatile void * addr)
/*
* fast, non-SMP clear_bit routine
*/
static __inline__ void
__clear_bit(unsigned long nr, volatile void * addr)
static inline void
__clear_bit(unsigned long nr, volatile void *ptr)
{
__asm__ __volatile__(
" lghi 2,56\n"
" lghi 1,7\n"
" xgr 2,%0\n"
" nr 1,%0\n"
" srlg 2,2,3\n"
" la 2,0(2,%1)\n"
" la 1,0(1,%2)\n"
" nc 0(1,2),0(1)"
: : "d" (nr), "a" (addr), "a" (&_ni_bitmap)
: "cc", "memory", "1", "2" );
unsigned long addr;
addr = (unsigned long) ptr + ((nr ^ 56) >> 3);
asm volatile("nc 0(1,%1),0(%2)"
: "+m" (*(char *) addr)
: "a" (addr), "a" (_ni_bitmap + (nr & 7))
: "cc" );
}
static __inline__ void
__constant_clear_bit(const unsigned long nr, volatile void * addr)
static inline void
__constant_clear_bit(const unsigned long nr, volatile void *ptr)
{
unsigned long addr;
addr = ((unsigned long) ptr) + ((nr >> 3) ^ 7);
switch (nr&7) {
case 0:
__asm__ __volatile__ ("la 1,%0\n\t"
"ni 0(1),0xFE"
: "=m" (*((volatile char *) addr + ((nr>>3)^7)))
: : "1", "cc", "memory" );
asm volatile ("ni 0(%1),0xFE"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 1:
__asm__ __volatile__ ("la 1,%0\n\t"
"ni 0(1),0xFD"
: "=m" (*((volatile char *) addr + ((nr>>3)^7)))
: : "1", "cc", "memory" );
asm volatile ("ni 0(%1),0xFD"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 2:
__asm__ __volatile__ ("la 1,%0\n\t"
"ni 0(1),0xFB"
: "=m" (*((volatile char *) addr + ((nr>>3)^7)))
: : "1", "cc", "memory" );
asm volatile ("ni 0(%1),0xFB"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 3:
__asm__ __volatile__ ("la 1,%0\n\t"
"ni 0(1),0xF7"
: "=m" (*((volatile char *) addr + ((nr>>3)^7)))
: : "1", "cc", "memory" );
asm volatile ("ni 0(%1),0xF7"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 4:
__asm__ __volatile__ ("la 1,%0\n\t"
"ni 0(1),0xEF"
: "=m" (*((volatile char *) addr + ((nr>>3)^7)))
: : "cc", "memory" );
asm volatile ("ni 0(%1),0xEF"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 5:
__asm__ __volatile__ ("la 1,%0\n\t"
"ni 0(1),0xDF"
: "=m" (*((volatile char *) addr + ((nr>>3)^7)))
: : "1", "cc", "memory" );
asm volatile ("ni 0(%1),0xDF"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 6:
__asm__ __volatile__ ("la 1,%0\n\t"
"ni 0(1),0xBF"
: "=m" (*((volatile char *) addr + ((nr>>3)^7)))
: : "1", "cc", "memory" );
asm volatile ("ni 0(%1),0xBF"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 7:
__asm__ __volatile__ ("la 1,%0\n\t"
"ni 0(1),0x7F"
: "=m" (*((volatile char *) addr + ((nr>>3)^7)))
: : "1", "cc", "memory" );
asm volatile ("ni 0(%1),0x7F"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
}
}
......@@ -404,72 +337,55 @@ __constant_clear_bit(const unsigned long nr, volatile void * addr)
/*
* fast, non-SMP change_bit routine
*/
static __inline__ void __change_bit(unsigned long nr, volatile void * addr)
static inline void __change_bit(unsigned long nr, volatile void *ptr)
{
__asm__ __volatile__(
" lghi 2,56\n"
" lghi 1,7\n"
" xgr 2,%0\n"
" nr 1,%0\n"
" srlg 2,2,3\n"
" la 2,0(2,%1)\n"
" la 1,0(1,%2)\n"
" xc 0(1,2),0(1)"
: : "d" (nr), "a" (addr), "a" (&_oi_bitmap)
: "cc", "memory", "1", "2" );
unsigned long addr;
addr = (unsigned long) ptr + ((nr ^ 56) >> 3);
asm volatile("xc 0(1,%1),0(%2)"
: "+m" (*(char *) addr)
: "a" (addr), "a" (_oi_bitmap + (nr & 7))
: "cc" );
}
static __inline__ void
__constant_change_bit(const unsigned long nr, volatile void * addr)
static inline void
__constant_change_bit(const unsigned long nr, volatile void *ptr)
{
unsigned long addr;
addr = ((unsigned long) ptr) + ((nr >> 3) ^ 7);
switch (nr&7) {
case 0:
__asm__ __volatile__ ("la 1,%0\n\t"
"xi 0(1),0x01"
: "=m" (*((volatile char *) addr + ((nr>>3)^7)))
: : "cc", "memory" );
asm volatile ("xi 0(%1),0x01"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 1:
__asm__ __volatile__ ("la 1,%0\n\t"
"xi 0(1),0x02"
: "=m" (*((volatile char *) addr + ((nr>>3)^7)))
: : "cc", "memory" );
asm volatile ("xi 0(%1),0x02"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 2:
__asm__ __volatile__ ("la 1,%0\n\t"
"xi 0(1),0x04"
: "=m" (*((volatile char *) addr + ((nr>>3)^7)))
: : "cc", "memory" );
asm volatile ("xi 0(%1),0x04"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 3:
__asm__ __volatile__ ("la 1,%0\n\t"
"xi 0(1),0x08"
: "=m" (*((volatile char *) addr + ((nr>>3)^7)))
: : "cc", "memory" );
asm volatile ("xi 0(%1),0x08"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 4:
__asm__ __volatile__ ("la 1,%0\n\t"
"xi 0(1),0x10"
: "=m" (*((volatile char *) addr + ((nr>>3)^7)))
: : "cc", "memory" );
asm volatile ("xi 0(%1),0x10"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 5:
__asm__ __volatile__ ("la 1,%0\n\t"
"xi 0(1),0x20"
: "=m" (*((volatile char *) addr + ((nr>>3)^7)))
: : "1", "cc", "memory" );
asm volatile ("xi 0(%1),0x20"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 6:
__asm__ __volatile__ ("la 1,%0\n\t"
"xi 0(1),0x40"
: "=m" (*((volatile char *) addr + ((nr>>3)^7)))
: : "1", "cc", "memory" );
asm volatile ("xi 0(%1),0x40"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
case 7:
__asm__ __volatile__ ("la 1,%0\n\t"
"xi 0(1),0x80"
: "=m" (*((volatile char *) addr + ((nr>>3)^7)))
: : "1", "cc", "memory" );
asm volatile ("xi 0(%1),0x80"
: "+m" (*(char *) addr) : "a" (addr) : "cc" );
break;
}
}
......@@ -482,74 +398,57 @@ __constant_change_bit(const unsigned long nr, volatile void * addr)
/*
* fast, non-SMP test_and_set_bit routine
*/
static __inline__ int
test_and_set_bit_simple(unsigned long nr, volatile void * addr)
static inline int
test_and_set_bit_simple(unsigned long nr, volatile void *ptr)
{
int oldbit;
__asm__ __volatile__(
" lghi 1,56\n"
" lghi 2,7\n"
" xgr 1,%1\n"
" nr 2,%1\n"
" srlg 1,1,3\n"
" la 1,0(1,%2)\n"
" ic %0,0(1)\n"
" srl %0,0(2)\n"
" la 2,0(2,%3)\n"
" oc 0(1,1),0(2)"
: "=&d" (oldbit) : "d" (nr), "a" (addr), "a" (&_oi_bitmap)
: "cc", "memory", "1", "2" );
return oldbit & 1;
unsigned long addr;
unsigned char ch;
addr = (unsigned long) ptr + ((nr ^ 56) >> 3);
ch = *(unsigned char *) addr;
asm volatile("oc 0(1,%1),0(%2)"
: "+m" (*(char *) addr)
: "a" (addr), "a" (_oi_bitmap + (nr & 7))
: "cc" );
return (ch >> (nr & 7)) & 1;
}
#define __test_and_set_bit(X,Y) test_and_set_bit_simple(X,Y)
/*
* fast, non-SMP test_and_clear_bit routine
*/
static __inline__ int
test_and_clear_bit_simple(unsigned long nr, volatile void * addr)
static inline int
test_and_clear_bit_simple(unsigned long nr, volatile void *ptr)
{
int oldbit;
__asm__ __volatile__(
" lghi 1,56\n"
" lghi 2,7\n"
" xgr 1,%1\n"
" nr 2,%1\n"
" srlg 1,1,3\n"
" la 1,0(1,%2)\n"
" ic %0,0(1)\n"
" srl %0,0(2)\n"
" la 2,0(2,%3)\n"
" nc 0(1,1),0(2)"
: "=&d" (oldbit) : "d" (nr), "a" (addr), "a" (&_ni_bitmap)
: "cc", "memory", "1", "2" );
return oldbit & 1;
unsigned long addr;
unsigned char ch;
addr = (unsigned long) ptr + ((nr ^ 56) >> 3);
ch = *(unsigned char *) addr;
asm volatile("nc 0(1,%1),0(%2)"
: "+m" (*(char *) addr)
: "a" (addr), "a" (_ni_bitmap + (nr & 7))
: "cc" );
return (ch >> (nr & 7)) & 1;
}
#define __test_and_clear_bit(X,Y) test_and_clear_bit_simple(X,Y)
/*
* fast, non-SMP test_and_change_bit routine
*/
static __inline__ int
test_and_change_bit_simple(unsigned long nr, volatile void * addr)
static inline int
test_and_change_bit_simple(unsigned long nr, volatile void *ptr)
{
int oldbit;
__asm__ __volatile__(
" lghi 1,56\n"
" lghi 2,7\n"
" xgr 1,%1\n"
" nr 2,%1\n"
" srlg 1,1,3\n"
" la 1,0(1,%2)\n"
" ic %0,0(1)\n"
" srl %0,0(2)\n"
" la 2,0(2,%3)\n"
" xc 0(1,1),0(2)"
: "=&d" (oldbit) : "d" (nr), "a" (addr), "a" (&_oi_bitmap)
: "cc", "memory", "1", "2" );
return oldbit & 1;
unsigned long addr;
unsigned char ch;
addr = (unsigned long) ptr + ((nr ^ 56) >> 3);
ch = *(unsigned char *) addr;
asm volatile("xc 0(1,%1),0(%2)"
: "+m" (*(char *) addr)
: "a" (addr), "a" (_oi_bitmap + (nr & 7))
: "cc" );
return (ch >> (nr & 7)) & 1;
}
#define __test_and_change_bit(X,Y) test_and_change_bit_simple(X,Y)
......@@ -574,25 +473,18 @@ test_and_change_bit_simple(unsigned long nr, volatile void * addr)
* This routine doesn't need to be atomic.
*/
static __inline__ int __test_bit(unsigned long nr, volatile void * addr)
static inline int __test_bit(unsigned long nr, volatile void *ptr)
{
int oldbit;
__asm__ __volatile__(
" lghi 2,56\n"
" lghi 1,7\n"
" xgr 2,%1\n"
" nr 1,%1\n"
" srlg 2,2,3\n"
" ic %0,0(2,%2)\n"
" srl %0,0(1)\n"
: "=&d" (oldbit) : "d" (nr), "a" (addr)
: "cc", "1", "2" );
return oldbit & 1;
unsigned long addr;
unsigned char ch;
addr = (unsigned long) ptr + ((nr ^ 56) >> 3);
ch = *(unsigned char *) addr;
return (ch >> (nr & 7)) & 1;
}
static __inline__ int
__constant_test_bit(unsigned long nr, volatile void * addr) {
static inline int
__constant_test_bit(unsigned long nr, volatile void *addr) {
return (((volatile char *) addr)[(nr>>3)^7] & (1<<(nr&7))) != 0;
}
......@@ -604,53 +496,95 @@ __constant_test_bit(unsigned long nr, volatile void * addr) {
/*
* Find-bit routines..
*/
static __inline__ unsigned long
static inline unsigned long
find_first_zero_bit(void * addr, unsigned long size)
{
unsigned long res;
unsigned long res, cmp, count;
if (!size)
return 0;
__asm__(" lghi 0,-1\n"
" lgr 1,%1\n"
__asm__(" lghi %1,-1\n"
" lgr %2,%3\n"
" slgr %0,%0\n"
" aghi 1,63\n"
" srlg 1,1,6\n"
"0: cg 0,0(%0,%2)\n"
" aghi %2,63\n"
" srlg %2,%2,6\n"
"0: cg %1,0(%0,%4)\n"
" jne 1f\n"
" aghi %0,8\n"
" brct 1,0b\n"
" lgr %0,%1\n"
" brct %2,0b\n"
" lgr %0,%3\n"
" j 5f\n"
"1: lg 1,0(%0,%2)\n"
"1: lg %2,0(%0,%4)\n"
" sllg %0,%0,3\n"
" clr 1,0\n"
" clr %2,%1\n"
" jne 2f\n"
" aghi %0,32\n"
" srlg 1,1,32\n"
"2: lghi 0,0xff\n"
" tmll 1,0xffff\n"
" srlg %2,%2,32\n"
"2: lghi %1,0xff\n"
" tmll %2,0xffff\n"
" jno 3f\n"
" aghi %0,16\n"
" srl 1,16\n"
"3: tmll 1,0x00ff\n"
" srl %2,16\n"
"3: tmll %2,0x00ff\n"
" jno 4f\n"
" aghi %0,8\n"
" srl 1,8\n"
"4: ngr 1,0\n"
" ic 1,0(1,%3)\n"
" algr %0,1\n"
" srl %2,8\n"
"4: ngr %2,%1\n"
" ic %2,0(%2,%5)\n"
" algr %0,%2\n"
"5:"
: "=&a" (res), "=&d" (cmp), "=&a" (count)
: "a" (size), "a" (addr), "a" (&_zb_findmap) : "cc" );
return (res < size) ? res : size;
}
static inline unsigned long
find_first_bit(void * addr, unsigned long size)
{
unsigned long res, cmp, count;
if (!size)
return 0;
__asm__(" slgr %1,%1\n"
" lgr %2,%3\n"
" slgr %0,%0\n"
" aghi %2,63\n"
" srlg %2,%2,6\n"
"0: cg %1,0(%0,%4)\n"
" jne 1f\n"
" aghi %0,8\n"
" brct %2,0b\n"
" lgr %0,%3\n"
" j 5f\n"
"1: lg %2,0(%0,%4)\n"
" sllg %0,%0,3\n"
" clr %2,%1\n"
" jne 2f\n"
" aghi %0,32\n"
" srlg %2,%2,32\n"
"2: lghi %1,0xff\n"
" tmll %2,0xffff\n"
" jnz 3f\n"
" aghi %0,16\n"
" srl %2,16\n"
"3: tmll %2,0x00ff\n"
" jnz 4f\n"
" aghi %0,8\n"
" srl %2,8\n"
"4: ngr %2,%1\n"
" ic %2,0(%2,%5)\n"
" algr %0,%2\n"
"5:"
: "=&a" (res) : "a" (size), "a" (addr), "a" (&_zb_findmap)
: "cc", "0", "1" );
: "=&a" (res), "=&d" (cmp), "=&a" (count)
: "a" (size), "a" (addr), "a" (&_sb_findmap) : "cc" );
return (res < size) ? res : size;
}
static __inline__ unsigned long
static inline unsigned long
find_next_zero_bit (void * addr, unsigned long size, unsigned long offset)
{
unsigned long * p = ((unsigned long *) addr) + (offset >> 6);
unsigned long bitvec;
unsigned long bitvec, reg;
unsigned long set, bit = offset & 63, res;
if (bit) {
......@@ -658,28 +592,26 @@ find_next_zero_bit (void * addr, unsigned long size, unsigned long offset)
* Look for zero in first word
*/
bitvec = (*p) >> bit;
__asm__(" lhi 0,-1\n"
" lgr 1,%1\n"
__asm__(" lhi %2,-1\n"
" slgr %0,%0\n"
" clr 1,0\n"
" clr %1,%2\n"
" jne 0f\n"
" aghi %0,32\n"
" srlg 1,1,32\n"
"0: lghi 0,0xff\n"
" tmll 1,0xffff\n"
" srlg %1,%1,32\n"
"0: lghi %2,0xff\n"
" tmll %1,0xffff\n"
" jno 1f\n"
" aghi %0,16\n"
" srlg 1,1,16\n"
"1: tmll 1,0x00ff\n"
" srlg %1,%1,16\n"
"1: tmll %1,0x00ff\n"
" jno 2f\n"
" aghi %0,8\n"
" srlg 1,1,8\n"
"2: ngr 1,0\n"
" ic 1,0(1,%2)\n"
" algr %0,1"
: "=&d" (set)
: "d" (bitvec), "a" (&_zb_findmap)
: "cc", "0", "1" );
" srlg %1,%1,8\n"
"2: ngr %1,%2\n"
" ic %1,0(%1,%3)\n"
" algr %0,%1"
: "=&d" (set), "+a" (bitvec), "=&d" (reg)
: "a" (&_zb_findmap) : "cc" );
if (set < (64 - bit))
return set + offset;
offset += 64 - bit;
......@@ -692,76 +624,187 @@ find_next_zero_bit (void * addr, unsigned long size, unsigned long offset)
return (offset + res);
}
static inline unsigned long
find_next_bit (void * addr, unsigned long size, unsigned long offset)
{
unsigned long * p = ((unsigned long *) addr) + (offset >> 6);
unsigned long bitvec, reg;
unsigned long set, bit = offset & 63, res;
if (bit) {
/*
* Look for zero in first word
*/
bitvec = (*p) >> bit;
__asm__(" slgr %0,%0\n"
" ltr %1,%1\n"
" jnz 0f\n"
" aghi %0,32\n"
" srlg %1,%1,32\n"
"0: lghi %2,0xff\n"
" tmll %1,0xffff\n"
" jnz 1f\n"
" aghi %0,16\n"
" srlg %1,%1,16\n"
"1: tmll %1,0x00ff\n"
" jnz 2f\n"
" aghi %0,8\n"
" srlg %1,%1,8\n"
"2: ngr %1,%2\n"
" ic %1,0(%1,%3)\n"
" algr %0,%1"
: "=&d" (set), "+a" (bitvec), "=&d" (reg)
: "a" (&_sb_findmap) : "cc" );
if (set < (64 - bit))
return set + offset;
offset += 64 - bit;
p++;
}
/*
* No set bit yet, search remaining full words for a bit
*/
res = find_first_bit (p, size - 64 * (p - (unsigned long *) addr));
return (offset + res);
}
/*
* ffz = Find First Zero in word. Undefined if no zero exists,
* so code should check against ~0UL first..
*/
static __inline__ unsigned long ffz(unsigned long word)
static inline unsigned long ffz(unsigned long word)
{
int result;
unsigned long reg, result;
__asm__(" lhi 0,-1\n"
" lgr 1,%1\n"
__asm__(" lhi %2,-1\n"
" slgr %0,%0\n"
" clr 1,0\n"
" clr %1,%2\n"
" jne 0f\n"
" aghi %0,32\n"
" srlg 1,1,32\n"
"0: lghi 0,0xff\n"
" tmll 1,0xffff\n"
" srlg %1,%1,32\n"
"0: lghi %2,0xff\n"
" tmll %1,0xffff\n"
" jno 1f\n"
" aghi %0,16\n"
" srlg 1,1,16\n"
"1: tmll 1,0x00ff\n"
" srlg %1,%1,16\n"
"1: tmll %1,0x00ff\n"
" jno 2f\n"
" aghi %0,8\n"
" srlg 1,1,8\n"
"2: ngr 1,0\n"
" ic 1,0(1,%2)\n"
" algr %0,1"
: "=&d" (result)
: "d" (word), "a" (&_zb_findmap)
: "cc", "0", "1" );
" srlg %1,%1,8\n"
"2: ngr %1,%2\n"
" ic %1,0(%1,%3)\n"
" algr %0,%1"
: "=&d" (result), "+a" (word), "=&d" (reg)
: "a" (&_zb_findmap) : "cc" );
return result;
}
/*
* __ffs = find first bit in word. Undefined if no bit exists,
* so code should check against 0UL first..
*/
static inline unsigned long __ffs (unsigned long word)
{
unsigned long reg, result;
__asm__(" slgr %0,%0\n"
" ltr %1,%1\n"
" jnz 0f\n"
" aghi %0,32\n"
" srlg %1,%1,32\n"
"0: lghi %2,0xff\n"
" tmll %1,0xffff\n"
" jnz 1f\n"
" aghi %0,16\n"
" srlg %1,%1,16\n"
"1: tmll %1,0x00ff\n"
" jnz 2f\n"
" aghi %0,8\n"
" srlg %1,%1,8\n"
"2: ngr %1,%2\n"
" ic %1,0(%1,%3)\n"
" algr %0,%1"
: "=&d" (result), "+a" (word), "=&d" (reg)
: "a" (&_sb_findmap) : "cc" );
return result;
}
/*
* Every architecture must define this function. It's the fastest
* way of searching a 140-bit bitmap where the first 100 bits are
* unlikely to be set. It's guaranteed that at least one of the 140
* bits is cleared.
*/
static inline int sched_find_first_bit(unsigned long *b)
{
return find_first_bit(b, 140);
}
/*
* ffs: find first bit set. This is defined the same way as
* the libc and compiler builtin ffs routines, therefore
* differs in spirit from the above ffz (man ffs).
*/
extern int __inline__ ffs (int x)
extern int inline ffs (int x)
{
int r;
int r = 1;
if (x == 0)
return 0;
__asm__(" lr 1,%1\n"
" slr %0,%0\n"
" tml 1,0xFFFF\n"
__asm__(" tml %1,0xffff\n"
" jnz 0f\n"
" srl %1,16\n"
" ahi %0,16\n"
" srl 1,16\n"
"0: tml 1,0x00FF\n"
"0: tml %1,0x00ff\n"
" jnz 1f\n"
" srl %1,8\n"
" ahi %0,8\n"
" srl 1,8\n"
"1: tml 1,0x000F\n"
"1: tml %1,0x000f\n"
" jnz 2f\n"
" srl %1,4\n"
" ahi %0,4\n"
" srl 1,4\n"
"2: tml 1,0x0003\n"
"2: tml %1,0x0003\n"
" jnz 3f\n"
" srl %1,2\n"
" ahi %0,2\n"
" srl 1,2\n"
"3: tml 1,0x0001\n"
"3: tml %1,0x0001\n"
" jnz 4f\n"
" ahi %0,1\n"
"4:"
: "=&d" (r) : "d" (x) : "cc", "1" );
return r+1;
: "=&d" (r), "+d" (x) : : "cc" );
return r;
}
/*
* fls: find last bit set.
*/
extern __inline__ int fls(int x)
{
int r = 32;
if (x == 0)
return 0;
__asm__(" tmh %1,0xffff\n"
" jz 0f\n"
" sll %1,16\n"
" ahi %0,-16\n"
"0: tmh %1,0xff00\n"
" jz 1f\n"
" sll %1,8\n"
" ahi %0,-8\n"
"1: tmh %1,0xf000\n"
" jz 2f\n"
" sll %1,4\n"
" ahi %0,-4\n"
"2: tmh %1,0xc000\n"
" jz 3f\n"
" sll %1,2\n"
" ahi %0,-2\n"
"3: tmh %1,0x8000\n"
" jz 4f\n"
" ahi %0,-1\n"
"4:"
: "+d" (r), "+d" (x) : : "cc" );
return r;
}
/*
......@@ -789,54 +832,54 @@ extern int __inline__ ffs (int x)
#define ext2_set_bit(nr, addr) test_and_set_bit((nr)^56, addr)
#define ext2_clear_bit(nr, addr) test_and_clear_bit((nr)^56, addr)
#define ext2_test_bit(nr, addr) test_bit((nr)^56, addr)
static __inline__ unsigned long
static inline unsigned long
ext2_find_first_zero_bit(void *vaddr, unsigned long size)
{
unsigned long res;
unsigned long res, cmp, count;
if (!size)
return 0;
__asm__(" lghi 0,-1\n"
" lgr 1,%1\n"
" aghi 1,63\n"
" srlg 1,1,6\n"
__asm__(" lghi %1,-1\n"
" lgr %2,%3\n"
" aghi %2,63\n"
" srlg %2,%2,6\n"
" slgr %0,%0\n"
"0: clg 0,0(%0,%2)\n"
"0: clg %1,0(%0,%4)\n"
" jne 1f\n"
" aghi %0,8\n"
" brct 1,0b\n"
" lgr %0,%1\n"
" brct %2,0b\n"
" lgr %0,%3\n"
" j 5f\n"
"1: cl 0,0(%0,%2)\n"
"1: cl %1,0(%0,%4)\n"
" jne 2f\n"
" aghi %0,4\n"
"2: l 1,0(%0,%2)\n"
"2: l %2,0(%0,%4)\n"
" sllg %0,%0,3\n"
" aghi %0,24\n"
" lghi 0,0xff\n"
" tmlh 1,0xffff\n"
" lghi %1,0xff\n"
" tmlh %2,0xffff\n"
" jo 3f\n"
" aghi %0,-16\n"
" srl 1,16\n"
"3: tmll 1,0xff00\n"
" srl %2,16\n"
"3: tmll %2,0xff00\n"
" jo 4f\n"
" aghi %0,-8\n"
" srl 1,8\n"
"4: ngr 1,0\n"
" ic 1,0(1,%3)\n"
" algr %0,1\n"
" srl %2,8\n"
"4: ngr %2,%1\n"
" ic %2,0(%2,%5)\n"
" algr %0,%2\n"
"5:"
: "=&a" (res) : "a" (size), "a" (vaddr), "a" (&_zb_findmap)
: "cc", "0", "1" );
: "=&a" (res), "=&d" (cmp), "=&a" (count)
: "a" (size), "a" (vaddr), "a" (&_zb_findmap) : "cc" );
return (res < size) ? res : size;
}
static __inline__ unsigned long
static inline unsigned long
ext2_find_next_zero_bit(void *vaddr, unsigned long size, unsigned long offset)
{
unsigned long *addr = vaddr;
unsigned long *p = addr + (offset >> 6);
unsigned long word;
unsigned long word, reg;
unsigned long bit = offset & 63UL, res;
if (offset >= size)
......@@ -848,30 +891,28 @@ ext2_find_next_zero_bit(void *vaddr, unsigned long size, unsigned long offset)
word >>= bit;
res = bit;
/* Look for zero in first 8 byte word */
__asm__(" lgr 1,%1\n"
" lghi 0,0xff\n"
" tmll 1,0xffff\n"
__asm__(" lghi %2,0xff\n"
" tmll %1,0xffff\n"
" jno 2f\n"
" ahi %0,16\n"
" srlg 1,1,16\n"
"0: tmll 1,0xffff\n"
" srlg %1,%1,16\n"
"0: tmll %1,0xffff\n"
" jno 2f\n"
" ahi %0,16\n"
" srlg 1,1,16\n"
"1: tmll 1,0xffff\n"
" srlg %1,%1,16\n"
"1: tmll %1,0xffff\n"
" jno 2f\n"
" ahi %0,16\n"
" srl 1,16\n"
"2: tmll 1,0x00ff\n"
" srl %1,16\n"
"2: tmll %1,0x00ff\n"
" jno 3f\n"
" ahi %0,8\n"
" srl 1,8\n"
"3: ngr 1,0\n"
" ic 1,0(1,%2)\n"
" alr %0,1"
: "+&d" (res)
: "d" (word), "a" (&_zb_findmap)
: "cc", "0", "1" );
" srl %1,8\n"
"3: ngr %1,%2\n"
" ic %1,0(%1,%3)\n"
" alr %0,%1"
: "+&d" (res), "+a" (word), "=&d" (reg)
: "a" (&_zb_findmap) : "cc" );
if (res < 64)
return (p - addr)*64 + res;
p++;
......
......@@ -13,88 +13,74 @@
#ifdef __GNUC__
static __inline__ __const__ __u64 ___arch__swab64(__u64 x)
static __inline__ __const__ __u64 ___arch__swab64p(__u64 *x)
{
__u64 result;
__asm__ __volatile__ (
" lrvg %0,%1"
: "=&d" (result) : "m" (x) );
: "=d" (result) : "m" (*x) );
return result;
}
static __inline__ __const__ __u64 ___arch__swab64p(__u64 *x)
static __inline__ __const__ __u64 ___arch__swab64(__u64 x)
{
__u64 result;
__asm__ __volatile__ (
" lrvg %0,%1"
: "=d" (result) : "m" (*x) );
" lrvgr %0,%1"
: "=d" (result) : "d" (x) );
return result;
}
static __inline__ void ___arch__swab64s(__u64 *x)
{
__asm__ __volatile__ (
" lrvg 0,%0\n"
" stg 0,%0"
: "+m" (*x) : : "0");
*x = ___arch__swab64p(x);
}
static __inline__ __const__ __u32 ___arch__swab32(__u32 x)
static __inline__ __const__ __u32 ___arch__swab32p(__u32 *x)
{
__u32 result;
__asm__ __volatile__ (
" lrv %0,%1"
: "=&d" (result) : "m" (x) );
: "=d" (result) : "m" (*x) );
return result;
}
static __inline__ __const__ __u32 ___arch__swab32p(__u32 *x)
static __inline__ __const__ __u32 ___arch__swab32(__u32 x)
{
__u32 result;
__asm__ __volatile__ (
" lrv %0,%1"
: "=d" (result) : "m" (*x) );
" lrvr %0,%1"
: "=d" (result) : "d" (x) );
return result;
}
static __inline__ void ___arch__swab32s(__u32 *x)
{
__asm__ __volatile__ (
" lrv 0,%0\n"
" st 0,%0"
: "+m" (*x) : : "0" );
*x = ___arch__swab32p(x);
}
static __inline__ __const__ __u16 ___arch__swab16(__u16 x)
static __inline__ __const__ __u16 ___arch__swab16p(__u16 *x)
{
__u16 result;
__asm__ __volatile__ (
" lrvh %0,%1"
: "=d" (result) : "m" (x) );
: "=d" (result) : "m" (*x) );
return result;
}
static __inline__ __const__ __u16 ___arch__swab16p(__u16 *x)
static __inline__ __const__ __u16 ___arch__swab16(__u16 x)
{
__u16 result;
__asm__ __volatile__ (
" lrvh %0,%1"
: "=d" (result) : "m" (*x) );
return result;
return ___arch__swab16p(&x);
}
static __inline__ void ___arch__swab16s(__u16 *x)
{
__asm__ __volatile__ (
" lrvh 0,%0\n"
" sth 0,%0"
: "+m" (*x) : : "0" );
*x = ___arch__swab16p(x);
}
#define __arch__swab64(x) ___arch__swab64(x)
......
#ifndef _S390X_CACHEFLUSH_H
#define _S390X_CACHEFLUSH_H
/* Keep includes the same across arches. */
#include <linux/mm.h>
/* Caches aren't brain-dead on the s390. */
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
#endif /* _S390X_CACHEFLUSH_H */
/*
* File...........: linux/include/asm-s390/ccwcache.h
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000
*/
#ifndef CCWCACHE_H
#define CCWCACHE_H
#include <linux/slab.h>
#include <asm/irq.h>
#ifndef __KERNEL__
#define kmem_cache_t void
#endif /* __KERNEL__ */
typedef struct ccw_req_t {
/* eye catcher plus queueing information */
unsigned int magic;
struct ccw_req_t *next; /* pointer to next ccw_req_t in queue */
struct ccw_req_t *int_next; /* for internal queueing */
struct ccw_req_t *int_prev; /* for internal queueing */
/* Where to execute what... */
void *device; /* index of the device the req is for */
void *req; /* pointer to originating request */
ccw1_t *cpaddr; /* address of channel program */
char status; /* reflecting the status of this request */
char flags; /* see below */
short retries; /* A retry counter to be set when filling */
/* ... and how */
int options; /* options for execution */
char lpm; /* logical path mask */
void *data; /* pointer to data area */
devstat_t *dstat; /* The device status in case of an error */
/* these are important for recovering erroneous requests */
struct ccw_req_t *refers; /* Does this request refer to another one? */
void *function; /* refers to the originating ERP action */ ;
unsigned long long expires; /* expiratioj period */
/* these are for profiling purposes */
unsigned long long buildclk; /* TOD-clock of request generation */
unsigned long long startclk; /* TOD-clock of request start */
unsigned long long stopclk; /* TOD-clock of request interrupt */
unsigned long long endclk; /* TOD-clock of request termination */
/* these are for internal use */
int cplength; /* length of the channel program in CCWs */
int datasize; /* amount of additional data in bytes */
kmem_cache_t *cache; /* the cache this data comes from */
} __attribute__ ((aligned(4))) ccw_req_t;
/*
* ccw_req_t -> status can be:
*/
#define CQR_STATUS_EMPTY 0x00 /* request is empty */
#define CQR_STATUS_FILLED 0x01 /* request is ready to be preocessed */
#define CQR_STATUS_QUEUED 0x02 /* request is queued to be processed */
#define CQR_STATUS_IN_IO 0x03 /* request is currently in IO */
#define CQR_STATUS_DONE 0x04 /* request is completed successfully */
#define CQR_STATUS_ERROR 0x05 /* request is completed with error */
#define CQR_STATUS_FAILED 0x06 /* request is finally failed */
#define CQR_STATUS_PENDING 0x07 /* request is waiting for interrupt - ERP only */
#define CQR_FLAGS_CHAINED 0x01 /* request is chained by another (last CCW is TIC) */
#ifdef __KERNEL__
#define SMALLEST_SLAB (sizeof(struct ccw_req_t) <= 128 ? 128 :\
sizeof(struct ccw_req_t) <= 256 ? 256 : 512 )
/* SMALLEST_SLAB(1),... PAGE_SIZE(CCW_NUMBER_CACHES) */
#define CCW_NUMBER_CACHES (sizeof(struct ccw_req_t) <= 128 ? 6 :\
sizeof(struct ccw_req_t) <= 256 ? 5 : 4 )
int ccwcache_init (void);
ccw_req_t *ccw_alloc_request (char *magic, int cplength, int additional_data);
void ccw_free_request (ccw_req_t * request);
#endif /* __KERNEL__ */
#endif /* CCWCACHE_H */
......@@ -13,16 +13,14 @@
#ifdef __KERNEL__
#include <asm/thread_info.h>
struct task_struct;
static inline struct task_struct * get_current(void)
{
struct task_struct *current;
__asm__("lghi %0,-16384\n\t"
"alg %0,0xd40"
: "=&r" (current) : : "cc" );
return current;
}
return current_thread_info()->task;
}
#define current get_current()
......
......@@ -10,6 +10,10 @@
*
* History of changes (starts July 2000)
* 05/04/01 created by moving the kernel interface to drivers/s390/block/dasd_int.h
* 12/06/01 DASD_API_VERSION 2 - binary compatible to 0 (new BIODASDINFO2)
* 01/23/02 DASD_API_VERSION 3 - added BIODASDPSRD (and BIODASDENAPAV) IOCTL
* 02/15/02 DASD_API_VERSION 4 - added BIODASDSATTR IOCTL
*
*/
#ifndef DASD_H
......@@ -18,10 +22,125 @@
#define DASD_IOCTL_LETTER 'D'
#if (DASD_API_VERSION == 0)
#define DASD_API_VERSION 4
/*
* struct dasd_information2_t
* represents any data about the device, which is visible to userspace.
* including foramt and featueres.
*/
typedef struct dasd_information2_t {
unsigned int devno; /* S/390 devno */
unsigned int real_devno; /* for aliases */
unsigned int schid; /* S/390 subchannel identifier */
unsigned int cu_type : 16; /* from SenseID */
unsigned int cu_model : 8; /* from SenseID */
unsigned int dev_type : 16; /* from SenseID */
unsigned int dev_model : 8; /* from SenseID */
unsigned int open_count;
unsigned int req_queue_len;
unsigned int chanq_len; /* length of chanq */
char type[4]; /* from discipline.name, 'none' for unknown */
unsigned int status; /* current device level */
unsigned int label_block; /* where to find the VOLSER */
unsigned int FBA_layout; /* fixed block size (like AIXVOL) */
unsigned int characteristics_size;
unsigned int confdata_size;
char characteristics[64]; /* from read_device_characteristics */
char configuration_data[256]; /* from read_configuration_data */
unsigned int format; /* format info like formatted/cdl/ldl/... */
unsigned int features; /* dasd features like 'ro',... */
unsigned int reserved0; /* reserved for further use ,... */
unsigned int reserved1; /* reserved for further use ,... */
unsigned int reserved2; /* reserved for further use ,... */
unsigned int reserved3; /* reserved for further use ,... */
unsigned int reserved4; /* reserved for further use ,... */
unsigned int reserved5; /* reserved for further use ,... */
unsigned int reserved6; /* reserved for further use ,... */
unsigned int reserved7; /* reserved for further use ,... */
} dasd_information2_t;
/*
* values to be used for dasd_information_t.format
* 0x00: NOT formatted
* 0x01: Linux disc layout
* 0x02: Common disc layout
*/
#define DASD_FORMAT_NONE 0
#define DASD_FORMAT_LDL 1
#define DASD_FORMAT_CDL 2
/*
* values to be used for dasd_information_t.features
* 0x00: default features
* 0x01: readonly (ro)
*/
#define DASD_FEATURE_DEFAULT 0
#define DASD_FEATURE_READONLY 1
#define DASD_PARTN_BITS 2
/*
* struct dasd_information_t
* represents any data about the data, which is visible to userspace
*/
typedef struct dasd_information_t {
unsigned int devno; /* S/390 devno */
unsigned int real_devno; /* for aliases */
unsigned int schid; /* S/390 subchannel identifier */
unsigned int cu_type : 16; /* from SenseID */
unsigned int cu_model : 8; /* from SenseID */
unsigned int dev_type : 16; /* from SenseID */
unsigned int dev_model : 8; /* from SenseID */
unsigned int open_count;
unsigned int req_queue_len;
unsigned int chanq_len; /* length of chanq */
char type[4]; /* from discipline.name, 'none' for unknown */
unsigned int status; /* current device level */
unsigned int label_block; /* where to find the VOLSER */
unsigned int FBA_layout; /* fixed block size (like AIXVOL) */
unsigned int characteristics_size;
unsigned int confdata_size;
char characteristics[64]; /* from read_device_characteristics */
char configuration_data[256]; /* from read_configuration_data */
} dasd_information_t;
/*
* Read Subsystem Data - Perfomance Statistics
*/
typedef struct dasd_rssd_perf_stats_t {
unsigned char invalid:1;
unsigned char format:3;
unsigned char data_format:4;
unsigned char unit_address;
unsigned short device_status;
unsigned int nr_read_normal;
unsigned int nr_read_normal_hits;
unsigned int nr_write_normal;
unsigned int nr_write_fast_normal_hits;
unsigned int nr_read_seq;
unsigned int nr_read_seq_hits;
unsigned int nr_write_seq;
unsigned int nr_write_fast_seq_hits;
unsigned int nr_read_cache;
unsigned int nr_read_cache_hits;
unsigned int nr_write_cache;
unsigned int nr_write_fast_cache_hits;
unsigned int nr_inhibit_cache;
unsigned int nr_bybass_cache;
unsigned int nr_seq_dasd_to_cache;
unsigned int nr_dasd_to_cache;
unsigned int nr_cache_to_dasd;
unsigned int nr_delayed_fast_write;
unsigned int nr_normal_fast_write;
unsigned int nr_seq_fast_write;
unsigned int nr_cache_miss;
unsigned char status2;
unsigned int nr_quick_write_promotes;
unsigned char reserved;
unsigned short ssid;
unsigned char reseved2[96];
} __attribute__((packed)) dasd_rssd_perf_stats_t;
/*
* struct profile_info_t
* holds the profinling information
......@@ -62,30 +181,36 @@ typedef struct format_data_t {
#define DASD_FMT_INT_INVAL 4 /* invalidate tracks */
#define DASD_FMT_INT_COMPAT 8 /* use OS/390 compatible disk layout */
/*
* struct dasd_information_t
* represents any data about the data, which is visible to userspace
* struct attrib_data_t
* represents the operation (cache) bits for the device.
* Used in DE to influence caching of the DASD.
*/
typedef struct dasd_information_t {
unsigned int devno; /* S/390 devno */
unsigned int real_devno; /* for aliases */
unsigned int schid; /* S/390 subchannel identifier */
unsigned int cu_type : 16; /* from SenseID */
unsigned int cu_model : 8; /* from SenseID */
unsigned int dev_type : 16; /* from SenseID */
unsigned int dev_model : 8; /* from SenseID */
unsigned int open_count;
unsigned int req_queue_len;
unsigned int chanq_len;
char type[4]; /* from discipline.name, 'none' for unknown */
unsigned int status; /* current device level */
unsigned int label_block; /* where to find the VOLSER */
unsigned int FBA_layout; /* fixed block size (like AIXVOL) */
unsigned int characteristics_size;
unsigned int confdata_size;
char characteristics[64]; /* from read_device_characteristics */
char configuration_data[256]; /* from read_configuration_data */
} dasd_information_t;
typedef struct attrib_data_t {
unsigned char operation:3; /* cache operation mode */
unsigned char reserved:5; /* cache operation mode */
__u16 nr_cyl; /* no of cyliners for read ahaed */
__u8 reserved2[29]; /* for future use */
} __attribute__ ((packed)) attrib_data_t;
/* definition of operation (cache) bits within attributes of DE */
#define DASD_NORMAL_CACHE 0x0
#define DASD_BYPASS_CACHE 0x1
#define DASD_INHIBIT_LOAD 0x2
#define DASD_SEQ_ACCESS 0x3
#define DASD_SEQ_PRESTAGE 0x4
#define DASD_REC_ACCESS 0x5
/********************************************************************************
* SECTION: Definition of IOCTLs
*
* Here ist how the ioctl-nr should be used:
* 0 - 31 DASD driver itself
* 32 - 239 still open
* 240 - 255 reserved for EMC
*******************************************************************************/
/* Disable the volume (for Linux) */
#define BIODASDDISABLE _IO(DASD_IOCTL_LETTER,0)
......@@ -97,15 +222,28 @@ typedef struct dasd_information_t {
#define BIODASDSLCK _IO(DASD_IOCTL_LETTER,4) /* steal lock */
/* reset profiling information of a device */
#define BIODASDPRRST _IO(DASD_IOCTL_LETTER,5)
/* enable PAV */
#define BIODASDENAPAV _IO(DASD_IOCTL_LETTER,6)
/* retrieve API version number */
#define DASDAPIVER _IOR(DASD_IOCTL_LETTER,0,int)
/* Get information on a dasd device */
#define BIODASDINFO _IOR(DASD_IOCTL_LETTER,1,dasd_information_t)
/* retrieve profiling information of a device */
#define BIODASDPRRD _IOR(DASD_IOCTL_LETTER,2,dasd_profile_info_t)
/* Get information on a dasd device (enhanced) */
#define BIODASDINFO2 _IOR(DASD_IOCTL_LETTER,3,dasd_information2_t)
/* Performance Statistics Read */
#define BIODASDPSRD _IOR(DASD_IOCTL_LETTER,4,dasd_rssd_perf_stats_t)
/* #define BIODASDFORMAT _IOW(IOCTL_LETTER,0,format_data_t) , deprecated */
#define BIODASDFMT _IOW(DASD_IOCTL_LETTER,1,format_data_t)
#endif /* DASD_API_VERSION */
/* Set Attributes (cache operations) */
#define BIODASDSATTR _IOW(DASD_IOCTL_LETTER,2,attrib_data_t)
#endif /* DASD_H */
/*
......
......@@ -54,7 +54,7 @@ struct __debug_entry{
#define DEBUG_DATA(entry) (char*)(entry + 1) /* data is stored behind */
/* the entry information */
#define STCK(x) asm volatile ("STCK %0" : "=m" (x) : : "cc" )
#define STCK(x) asm volatile ("STCK 0(%1)" : "=m" (x) : "a" (&(x)) : "cc")
typedef struct __debug_entry debug_entry_t;
......
......@@ -24,7 +24,7 @@ extern __u8 _ebc_toupper[]; /* EBCDIC -> uppercase */
extern __inline__ void
codepage_convert(const __u8 *codepage, volatile __u8 * addr, unsigned long nr)
{
if (nr <= 0)
if (nr-- <= 0)
return;
__asm__ __volatile__(
" bras 1,1f\n"
......@@ -34,7 +34,7 @@ codepage_convert(const __u8 *codepage, volatile __u8 * addr, unsigned long nr)
"1: ahi %1,-256\n"
" jp 0b\n"
" ex %1,0(1)"
: "+&a" (addr), "+&a" (nr-1)
: "+&a" (addr), "+&a" (nr)
: "a" (codepage) : "cc", "memory", "1" );
}
......
......@@ -77,7 +77,13 @@ typedef s390_regs elf_gregset_t;
#define ELF_PLATFORM (NULL)
#ifdef __KERNEL__
#define SET_PERSONALITY(ex, ibcs2) set_personality((ibcs2)?PER_SVR4:PER_LINUX)
#define SET_PERSONALITY(ex, ibcs2) \
do { \
if (ibcs2) \
set_personality(PER_SVR4); \
else if (current->personality != PER_LINUX32) \
set_personality(PER_LINUX); \
} while (0)
#endif
#endif
/*
* header file for FCP adapter driver for IBM eServer zSeries
*
* Authors:
* Martin Peschke <mpeschke@de.ibm.com>
* Raimund Schroeder <raimund.schroeder@de.ibm.com>
* Aron Zeh <arzeh@de.ibm.com>
* Wolfgang Taphorn <taphorn@de.ibm.com>
*
* Copyright (C) 2002 IBM Entwicklung GmbH, IBM Corporation
*/
#ifndef FSF_H
#define FSF_H
#define FSF_QTCB_VERSION1 0x00000001
#define FSF_QTCB_CURRENT_VERSION FSF_QTCB_VERSION1
/* FSF commands */
#define FSF_QTCB_FCP_CMND 0x00000001
#define FSF_QTCB_ABORT_FCP_CMND 0x00000002
#define FSF_QTCB_OPEN_PORT_WITH_DID 0x00000005
#define FSF_QTCB_OPEN_LUN 0x00000006
#define FSF_QTCB_CLOSE_LUN 0x00000007
#define FSF_QTCB_CLOSE_PORT 0x00000008
#define FSF_QTCB_CLOSE_PHYSICAL_PORT 0x00000009
#define FSF_QTCB_SEND_ELS 0x0000000B
#define FSF_QTCB_SEND_GENERIC 0x0000000C
#define FSF_QTCB_EXCHANGE_CONFIG_DATA 0x0000000D
/* FSF QTCB types */
#define FSF_IO_COMMAND 0x00000001
#define FSF_SUPPORT_COMMAND 0x00000002
#define FSF_CONFIG_COMMAND 0x00000003
/* association between FSF command and FSF QTCB type */
u32 fsf_qtcb_type[] = {
[ FSF_QTCB_FCP_CMND ] = FSF_IO_COMMAND,
[ FSF_QTCB_ABORT_FCP_CMND ] = FSF_SUPPORT_COMMAND,
[ FSF_QTCB_OPEN_PORT_WITH_DID ] = FSF_SUPPORT_COMMAND,
[ FSF_QTCB_OPEN_LUN ] = FSF_SUPPORT_COMMAND,
[ FSF_QTCB_CLOSE_LUN ] = FSF_SUPPORT_COMMAND,
[ FSF_QTCB_CLOSE_PORT ] = FSF_SUPPORT_COMMAND,
[ FSF_QTCB_CLOSE_PHYSICAL_PORT ] = FSF_SUPPORT_COMMAND,
[ FSF_QTCB_SEND_ELS ] = FSF_SUPPORT_COMMAND,
[ FSF_QTCB_SEND_GENERIC ] = FSF_SUPPORT_COMMAND,
[ FSF_QTCB_EXCHANGE_CONFIG_DATA ] = FSF_CONFIG_COMMAND
};
/* FSF protocol stati */
#define FSF_PROT_GOOD 0x00000001
#define FSF_PROT_QTCB_VERSION_ERROR 0x00000010
#define FSF_PROT_SEQ_NUMB_ERROR 0x00000020
#define FSF_PROT_UNSUPP_QTCB_TYPE 0x00000040
#define FSF_PROT_HOST_CONNECTION_INITIALIZING 0x00000080
#define FSF_PROT_FSF_STATUS_PRESENTED 0x00000100
#define FSF_PROT_DUPLICATE_REQUEST_ID 0x00000200
#define FSF_PROT_LINK_DOWN 0x00000400
#define FSF_PROT_REEST_QUEUE 0x00000800
#define FSF_PROT_ERROR_STATE 0x01000000
/* FSF stati */
#define FSF_GOOD 0x00000000
#define FSF_PORT_ALREADY_OPEN 0x00000001
#define FSF_LUN_ALREADY_OPEN 0x00000002
#define FSF_PORT_HANDLE_NOT_VALID 0x00000003
#define FSF_LUN_HANDLE_NOT_VALID 0x00000004
#define FSF_HANDLE_MISMATCH 0x00000005
#define FSF_SERVICE_CLASS_NOT_SUPPORTED 0x00000006
#define FSF_FCPLUN_NOT_VALID 0x00000009
//#define FSF_ACCESS_DENIED 0x00000010
#define FSF_ACCESS_TYPE_NOT_VALID 0x00000011
#define FSF_LUN_IN_USE 0x00000012
#define FSF_COMMAND_ABORTED_ULP 0x00000020
#define FSF_COMMAND_ABORTED_ADAPTER 0x00000021
#define FSF_FCP_COMMAND_DOES_NOT_EXIST 0x00000022
#define FSF_DIRECTION_INDICATOR_NOT_VALID 0x00000030
#define FSF_INBOUND_DATA_LENGTH_NOT_VALID 0x00000031 /* FIXME: obsolete? */
#define FSF_OUTBOUND_DATA_LENGTH_NOT_VALID 0x00000032 /* FIXME: obsolete? */
#define FSF_CMND_LENGTH_NOT_VALID 0x00000033
#define FSF_MAXIMUM_NUMBER_OF_PORTS_EXCEEDED 0x00000040
#define FSF_MAXIMUM_NUMBER_OF_LUNS_EXCEEDED 0x00000041
#define FSF_REQUEST_BUF_NOT_VALID 0x00000042
#define FSF_RESPONSE_BUF_NOT_VALID 0x00000043
#define FSF_ELS_COMMAND_REJECTED 0x00000050
#define FSF_GENERIC_COMMAND_REJECTED 0x00000051
//#define FSF_AUTHORIZATION_FAILURE 0x00000053
#define FSF_PORT_BOXED 0x00000059
//#define FSF_LUN_BOXED 0x0000005A
#define FSF_ADAPTER_STATUS_AVAILABLE 0x000000AD
#define FSF_FCP_RSP_AVAILABLE 0x000000AF
#define FSF_UNKNOWN_COMMAND 0x000000E2
//#define FSF_ERROR 0x000000FF
/* FSF status qualifier, recommendations */
#define FSF_SQ_NO_RECOM 0x00
#define FSF_SQ_FCP_RSP_AVAILABLE 0x01
#define FSF_SQ_RETRY_IF_POSSIBLE 0x02
#define FSF_SQ_ULP_DEPENDENT_ERP_REQUIRED 0x03
#define FSF_SQ_INVOKE_LINK_TEST_PROCEDURE 0x04
#define FSF_SQ_ULP_PROGRAMMING_ERROR 0x05
#define FSF_SQ_COMMAND_ABORTED 0x06
#define FSF_SQ_NO_RETRY_POSSIBLE 0x07
/* FSF status qualifier (most significant 4 bytes), local link down */
#define FSF_PSQ_LINK_NOLIGHT 0x00000004
#define FSF_PSQ_LINK_WRAPPLUG 0x00000008
#define FSF_PSQ_LINK_NOFCP 0x00000010
/* payload size in status read buffer */
#define FSF_STATUS_READ_PAYLOAD_SIZE 4032
/* status types in status read buffer */
#define FSF_STATUS_READ_PORT_CLOSED 0x00000001
#define FSF_STATUS_READ_INCOMING_ELS 0x00000002
#define FSF_STATUS_READ_BIT_ERROR_THRESHOLD 0x00000004
#define FSF_STATUS_READ_LINK_DOWN 0x00000005 /* FIXME: really? */
#define FSF_STATUS_READ_LINK_UP 0x00000006
/* status subtypes in status read buffer */
#define FSF_STATUS_READ_SUB_CLOSE_PHYS_PORT 0x00000001
#define FSF_STATUS_READ_SUB_ERROR_PORT 0x00000002
/* topologie that is detected by the adapter */
#define FSF_TOPO_ERROR 0x00000000
#define FSF_TOPO_P2P 0x00000001
#define FSF_TOPO_FABRIC 0x00000002
#define FSF_TOPO_AL 0x00000003
#define FSF_TOPO_FABRIC_VIRT 0x00000004
/* data direction for FCP commands */
#define FSF_DATADIR_WRITE 0x00000001
#define FSF_DATADIR_READ 0x00000002
#define FSF_DATADIR_READ_WRITE 0x00000003
#define FSF_DATADIR_CMND 0x00000004
/* fc service class */
#define FSF_CLASS_1 0x00000001
#define FSF_CLASS_2 0x00000002
#define FSF_CLASS_3 0x00000003
struct fsf_queue_designator;
struct fsf_status_read_buffer;
struct fsf_port_closed_payload;
struct fsf_bit_error_payload;
union fsf_prot_status_qual;
struct fsf_qual_version_error;
struct fsf_qual_sequence_error;
struct fsf_qtcb_prefix;
struct fsf_qtcb_header;
struct fsf_qtcb_bottom_config;
struct fsf_qtcb_bottom_support;
struct fsf_qtcb_bottom_io;
union fsf_qtcb_bottom;
typedef struct fsf_queue_designator {
u8 cssid;
u8 chpid;
u8 hla;
u8 ua;
u32 res1;
} __attribute__ ((packed)) fsf_queue_designator_t;
typedef struct fsf_port_closed_payload {
fsf_queue_designator_t queue_designator;
u32 port_handle;
} __attribute__ ((packed)) fsf_port_closed_payload_t;
typedef struct fsf_bit_error_payload {
u32 res1;
u32 link_failure_error_count;
u32 loss_of_sync_error_count;
u32 loss_of_signal_error_count;
u32 primitive_sequence_error_count;
u32 invalid_transmission_word_error_count;
u32 crc_error_count;
u32 primitive_sequence_event_timeout_count;
u32 elastic_buffer_overrun_error_count;
u32 fcal_arbitration_timeout_count;
u32 advertised_receive_b2b_credit;
u32 current_receive_b2b_credit;
u32 advertised_transmit_b2b_credit;
u32 current_transmit_b2b_credit;
} __attribute__ ((packed)) fsf_bit_error_payload_t;
typedef struct fsf_status_read_buffer {
u32 status_type;
u32 status_subtype;
u32 length;
u32 res1;
fsf_queue_designator_t queue_designator;
u32 d_id;
u32 class;
u64 fcp_lun;
u8 res3[24];
u8 payload[FSF_STATUS_READ_PAYLOAD_SIZE];
} __attribute__ ((packed)) fsf_status_read_buffer_t;
typedef struct fsf_qual_version_error {
u32 fsf_version;
u32 res1[3];
} __attribute__ ((packed)) fsf_qual_version_error_t;
typedef struct fsf_qual_sequence_error {
u32 exp_req_seq_no;
u32 res1[3];
} __attribute__ ((packed)) fsf_qual_sequence_error_t;
typedef struct fsf_qual_locallink_error {
u32 code;
u32 res1[3];
} __attribute__ ((packed)) fsf_qual_locallink_error_t;
typedef union fsf_prot_status_qual {
fsf_qual_version_error_t version_error;
fsf_qual_sequence_error_t sequence_error;
fsf_qual_locallink_error_t locallink_error;
} __attribute__ ((packed)) fsf_prot_status_qual_t;
typedef struct fsf_qtcb_prefix {
u64 req_id;
u32 qtcb_version;
u32 ulp_info;
u32 qtcb_type;
u32 req_seq_no;
u32 prot_status;
fsf_prot_status_qual_t prot_status_qual;
u8 res1[20];
} __attribute__ ((packed)) fsf_qtcb_prefix_t;
typedef struct fsf_qtcb_header {
u64 req_handle;
u32 fsf_command;
u32 res1;
u32 port_handle;
u32 lun_handle;
u32 res2;
u32 fsf_status;
u32 fsf_status_qual[4];
// fsf_status_qual_t fsf_status_qual; FIXME: define union
u8 res3[28];
u16 log_start;
u16 log_length;
u8 res4[16];
} __attribute__ ((packed)) fsf_qtcb_header_t;
typedef u64 fsf_wwn_t;
typedef struct fsf_nport_serv_param {
u8 common_serv_param[16];
fsf_wwn_t wwpn;
fsf_wwn_t wwnn;
u8 class1_serv_param[16];
u8 class2_serv_param[16];
u8 class3_serv_param[16];
u8 class4_serv_param[16];
u8 vendor_version_level[16];
u8 res1[16];
} __attribute__ ((packed)) fsf_nport_serv_param_t;
typedef struct fsf_plogi {
u32 code;
fsf_nport_serv_param_t serv_param;
} __attribute__ ((packed)) fsf_plogi_t;
#define FSF_FCP_CMND_SIZE 288
#define FSF_FCP_RSP_SIZE 128
typedef struct fsf_qtcb_bottom_io {
u32 data_direction;
u32 service_class;
u8 res1[8];
u32 fcp_cmnd_length;
u8 res2[12];
u8 fcp_cmnd[FSF_FCP_CMND_SIZE];
u8 fcp_rsp[FSF_FCP_RSP_SIZE];
u8 res3[64];
} __attribute__ ((packed)) fsf_qtcb_bottom_io_t;
typedef struct fsf_qtcb_bottom_support {
u8 res1[16];
u32 d_id;
u32 res2;
u64 fcp_lun;
u64 res3;
u64 req_handle;
u32 service_class;
u8 res4[3];
u8 timeout;
u8 res5[184];
u32 els1_length;
u32 els2_length;
u64 res6;
u8 els[256];
} __attribute__ ((packed)) fsf_qtcb_bottom_support_t;
typedef struct fsf_qtcb_bottom_config {
u32 lic_version;
u32 res1;
u32 high_qtcb_version;
u32 low_qtcb_version;
u32 max_qtcb_size;
u8 res2[12];
u32 fc_topology;
u32 fc_link_speed;
u32 adapter_type;
u32 peer_d_id;
u8 res3[12];
u32 s_id;
fsf_nport_serv_param_t nport_serv_param;
u8 res4[320];
} __attribute__ ((packed)) fsf_qtcb_bottom_config_t;
typedef union fsf_qtcb_bottom {
fsf_qtcb_bottom_io_t io;
fsf_qtcb_bottom_support_t support;
fsf_qtcb_bottom_config_t config;
} fsf_qtcb_bottom_t;
typedef struct fsf_qtcb {
fsf_qtcb_prefix_t prefix;
fsf_qtcb_header_t header;
fsf_qtcb_bottom_t bottom;
} __attribute__ ((packed)) fsf_qtcb_t;
#endif /* FSF_H */
......@@ -16,6 +16,7 @@
#include <linux/threads.h>
#include <asm/lowcore.h>
#include <linux/sched.h>
#include <linux/cache.h>
/* entry.S is sensitive to the offsets of these fields */
typedef struct {
......
/*
* File...........: linux/include/asm-s390x/idals.h
* Author(s)......: Holger Smolinski <Holger.Smolinski@de.ibm.com>
* Martin Schwidefsky <schwidefsky@de.ibm.com>
* Bugreports.to..: <Linux390@de.ibm.com>
* (C) IBM Corporation, IBM Deutschland Entwicklung GmbH, 2000a
* History of changes
* 07/24/00 new file
* 05/04/02 code restructuring.
*/
#ifndef _S390_IDALS_H
#define _S390_IDALS_H
#include <linux/config.h>
#include <linux/errno.h>
#include <asm/irq.h>
#define IDA_SIZE_LOG 12 /* 11 for 2k , 12 for 4k */
#define IDA_BLOCK_SIZE (1L<<IDA_SIZE_LOG)
static inline addr_t *
idal_alloc ( int nridaws )
/*
* Test if an address/length pair needs an idal list.
*/
static inline int
idal_is_needed(void *vaddr, unsigned int length)
{
if ( nridaws > 33 )
BUG();
return kmalloc(nridaws * sizeof(addr_t), GFP_ATOMIC | GFP_DMA );
#if defined(CONFIG_ARCH_S390X)
return ((__pa(vaddr) + length) >> 31) != 0;
#else
return 0;
#endif
}
static inline void
idal_free ( addr_t *idal )
/*
* Return the number of idal words needed for an address/length pair.
*/
static inline unsigned int
idal_nr_words(void *vaddr, unsigned int length)
{
kfree (idal);
#if defined(CONFIG_ARCH_S390X)
if (idal_is_needed(vaddr, length))
return ((__pa(vaddr) & (IDA_BLOCK_SIZE-1)) + length +
(IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG;
#endif
return 0;
}
/*
* Create the list of idal words for an address/length pair.
*/
static inline unsigned long *
idal_create_words(unsigned long *idaws, void *vaddr, unsigned int length)
{
#if defined(CONFIG_ARCH_S390X)
extern unsigned long __create_idal(unsigned long address, int count);
unsigned long paddr;
unsigned int cidaw;
paddr = __pa(vaddr);
cidaw = ((paddr & (IDA_BLOCK_SIZE-1)) + length +
(IDA_BLOCK_SIZE-1)) >> IDA_SIZE_LOG;
*idaws++ = paddr;
paddr &= -IDA_BLOCK_SIZE;
while (--cidaw > 0) {
paddr += IDA_BLOCK_SIZE;
*idaws++ = paddr;
}
#endif
return idaws;
}
/*
* Function: set_normalized_cda
* sets the address of the data in CCW
* if necessary it allocates an IDAL and sets sthe appropriate flags
* Sets the address of the data in CCW.
* If necessary it allocates an IDAL and sets the appropriate flags.
*/
static inline int
set_normalized_cda(ccw1_t * ccw, unsigned long address)
set_normalized_cda(ccw1_t * ccw, void *vaddr)
{
int ret = 0;
#if defined (CONFIG_ARCH_S390X)
if (((address + ccw->count) >> 31) != 0) {
unsigned int nridaws;
unsigned long *idal;
if (ccw->flags & CCW_FLAG_IDA)
BUG();
address = __create_idal(address, ccw->count);
if (address)
return -EINVAL;
nridaws = idal_nr_words(vaddr, ccw->count);
if (nridaws > 0) {
idal = kmalloc(nridaws * sizeof(unsigned long),
GFP_ATOMIC | GFP_DMA );
if (idal == NULL)
return -ENOMEM;
idal_create_words(idal, vaddr, ccw->count);
ccw->flags |= CCW_FLAG_IDA;
else
ret = -ENOMEM;
vaddr = idal;
}
#endif
ccw->cda = (__u32) address;
return ret;
ccw->cda = (__u32)(unsigned long) vaddr;
return 0;
}
/*
* Function: clear_normalized_cda
* releases any allocated IDAL related to the CCW
* Releases any allocated IDAL related to the CCW.
*/
static inline void
clear_normalized_cda ( ccw1_t * ccw )
clear_normalized_cda(ccw1_t * ccw)
{
#if defined(CONFIG_ARCH_S390X)
if ( ccw -> flags & CCW_FLAG_IDA ) {
idal_free ( (addr_t *)(unsigned long) (ccw -> cda ));
ccw -> flags &= ~CCW_FLAG_IDA;
if (ccw->flags & CCW_FLAG_IDA) {
kfree((void *)(unsigned long) ccw->cda);
ccw->flags &= ~CCW_FLAG_IDA;
}
#endif
ccw -> cda = 0;
ccw->cda = 0;
}
#endif
/*
* include/asm-s390/init.h
* include/asm-s390x/init.h
*
* S390 version
* S390 version (s390x)
*/
#ifndef _S390_INIT_H
#define _S390_INIT_H
#define __init __attribute__ ((constructor))
/* don't know, if need on S390 */
#define __initdata
#define __initfunc(__arginit) \
__arginit __init; \
__arginit
/* For assembly routines
* need to define ?
*/
/*
#define __INIT .section ".text.init",#alloc,#execinstr
#define __FINIT .previous
#define __INITDATA .section ".data.init",#alloc,#write
*/
#define __cacheline_aligned __attribute__ ((__aligned__(256)))
#endif
#error "<asm/init.h> should never be used - use <linux/init.h> instead"
......@@ -40,6 +40,11 @@ extern inline void * phys_to_virt(unsigned long address)
return __io_virt(address);
}
/*
* Change "struct page" to physical address.
*/
#define page_to_phys(page) ((page - mem_map) << PAGE_SHIFT)
extern void * __ioremap(unsigned long offset, unsigned long size, unsigned long flags);
extern inline void * ioremap (unsigned long offset, unsigned long size)
......
......@@ -10,14 +10,12 @@
*/
#define __MAX_SUBCHANNELS 65536
#define NR_IRQS __MAX_SUBCHANNELS
#define NR_CHPIDS 256
#define LPM_ANYPATH 0xff /* doesn't really belong here, Ingo? */
#define INVALID_STORAGE_AREA ((void *)(-1 - 0x3FFF ))
extern int disable_irq(unsigned int);
extern int enable_irq(unsigned int);
/*
* path management control word
*/
......@@ -362,6 +360,92 @@ typedef struct {
/* extended part */
ciw_t ciw[MAX_CIWS]; /* variable # of CIWs */
} __attribute__ ((packed,aligned(4))) senseid_t;
/*
* where we put the ssd info
*/
typedef struct _ssd_info {
__u8 valid:1;
__u8 type:7; /* subchannel type */
__u8 chpid[8]; /* chpids */
__u16 fla[8]; /* full link addresses */
} __attribute__ ((packed)) ssd_info_t;
/*
* area for store event information
*/
typedef struct chsc_area_t {
struct {
/* word 0 */
__u16 command_code1;
__u16 command_code2;
union {
struct {
/* word 1 */
__u32 reserved1;
/* word 2 */
__u32 reserved2;
} __attribute__ ((packed,aligned(8))) sei_req;
struct {
/* word 1 */
__u16 reserved1;
__u16 f_sch; /* first subchannel */
/* word 2 */
__u16 reserved2;
__u16 l_sch; /* last subchannel */
} __attribute__ ((packed,aligned(8))) ssd_req;
} request_block_data;
/* word 3 */
__u32 reserved3;
} __attribute__ ((packed,aligned(8))) request_block;
struct {
/* word 0 */
__u16 length;
__u16 response_code;
/* word 1 */
__u32 reserved1;
union {
struct {
/* word 2 */
__u8 flags;
__u8 vf; /* validity flags */
__u8 rs; /* reporting source */
__u8 cc; /* content code */
/* word 3 */
__u16 fla; /* full link address */
__u16 rsid; /* reporting source id */
/* word 4 */
__u32 reserved2;
/* word 5 */
__u32 reserved3;
/* word 6 */
__u32 ccdf; /* content-code dependent field */
/* word 7 */
__u32 reserved4;
/* word 8 */
__u32 reserved5;
/* word 9 */
__u32 reserved6;
} __attribute__ ((packed,aligned(8))) sei_res;
struct {
/* word 2 */
__u8 sch_valid : 1;
__u8 dev_valid : 1;
__u8 st : 3; /* subchannel type */
__u8 zeroes : 3;
__u8 unit_addr; /* unit address */
__u16 devno; /* device number */
/* word 3 */
__u8 path_mask;
__u8 fla_valid_mask;
__u16 sch; /* subchannel */
/* words 4-5 */
__u8 chpid[8]; /* chpids 0-7 */
/* words 6-9 */
__u16 fla[8]; /* full link addresses 0-7 */
} __attribute__ ((packed,aligned(8))) ssd_res;
} response_block_data;
} __attribute__ ((packed,aligned(8))) response_block;
} __attribute__ ((packed,aligned(PAGE_SIZE))) chsc_area_t;
#endif /* __KERNEL__ */
/*
......@@ -491,6 +575,7 @@ typedef struct {
/* ... for suspended CCWs */
#define DOIO_TIMEOUT 0x0080 /* 3 secs. timeout for sync. I/O */
#define DOIO_DONT_CALL_INTHDLR 0x0100 /* don't call interrupt handler */
#define DOIO_CANCEL_ON_TIMEOUT 0x0200 /* cancel I/O if it timed out */
/*
* do_IO()
......@@ -513,11 +598,6 @@ int do_IO( int irq, /* IRQ aka. subchannel number */
__u8 lpm, /* logical path mask */
unsigned long flag); /* flags : see above */
int start_IO( int irq, /* IRQ aka. subchannel number */
ccw1_t *cpa, /* logical channel program address */
unsigned long intparm, /* interruption parameter */
__u8 lpm, /* logical path mask */
unsigned int flag); /* flags : see above */
void do_crw_pending( void ); /* CRW handler */
......@@ -531,14 +611,6 @@ int clear_IO( int irq, /* IRQ aka. subchannel number */
unsigned long intparm, /* dummy intparm */
unsigned long flag); /* possible DOIO_WAIT_FOR_INTERRUPT */
int process_IRQ( struct pt_regs regs,
unsigned int irq,
unsigned int intparm);
int enable_cpu_sync_isc ( int irq );
int disable_cpu_sync_isc( int irq );
typedef struct {
int irq; /* irq, aka. subchannel */
__u16 devno; /* device number */
......@@ -546,8 +618,6 @@ typedef struct {
senseid_t sid_data; /* senseID data */
} s390_dev_info_t;
int get_dev_info( int irq, s390_dev_info_t *); /* to be eliminated - don't use */
int get_dev_info_by_irq ( int irq, s390_dev_info_t *pdi);
int get_dev_info_by_devno( __u16 devno, s390_dev_info_t *pdi);
......@@ -560,8 +630,6 @@ int get_irq_next ( int irq );
int read_dev_chars( int irq, void **buffer, int length );
int read_conf_data( int irq, void **buffer, int *length, __u8 lpm );
int s390_DevicePathVerification( int irq, __u8 domask );
int s390_request_irq_special( int irq,
io_handler_func_t io_handler,
not_oper_handler_func_t not_oper_handler,
......@@ -570,7 +638,6 @@ int s390_request_irq_special( int irq,
void *dev_id);
extern int set_cons_dev(int irq);
extern int reset_cons_dev(int irq);
extern int wait_cons_dev(int irq);
extern schib_t *s390_get_schib( int irq );
......@@ -630,11 +697,6 @@ extern __inline__ int msch_err(int irq, volatile schib_t *addr)
" .align 8\n"
" .quad 0b,2b\n"
".previous"
" lr 1,%1\n"
" msch 0(%2)\n"
"0: ipm %0\n"
" srl %0,28\n"
"1:\n"
#else
".section .fixup,\"ax\"\n"
"2: l %0,%3\n"
......@@ -743,6 +805,21 @@ extern __inline__ int hsch(int irq)
return ccode;
}
extern __inline__ int xsch(int irq)
{
int ccode;
__asm__ __volatile__(
" lr 1,%1\n"
" .insn rre,0xb2760000,%1,0\n"
" ipm %0\n"
" srl %0,28"
: "=d" (ccode)
: "d" (irq | 0x10000L)
: "cc", "1" );
return ccode;
}
extern __inline__ int iac( void)
{
int ccode;
......@@ -805,6 +882,20 @@ extern __inline__ int diag210( diag210_t * addr)
: "cc" );
return ccode;
}
extern __inline__ int chsc( chsc_area_t * chsc_area)
{
int cc;
__asm__ __volatile__ (
".insn rre,0xb25f0000,%1,0 \n\t"
"ipm %0 \n\t"
"srl %0,28 \n\t"
: "=d" (cc)
: "d" (chsc_area)
: "cc" );
return cc;
}
/*
* Various low-level irq details needed by irq.c, process.c,
......@@ -813,13 +904,6 @@ extern __inline__ int diag210( diag210_t * addr)
* Interrupt entry/exit code at both C and assembly level
*/
void mask_irq(unsigned int irq);
void unmask_irq(unsigned int irq);
#define MAX_IRQ_SOURCES 128
extern spinlock_t irq_controller_lock;
#ifdef CONFIG_SMP
#include <asm/atomic.h>
......@@ -849,17 +933,10 @@ static inline void irq_exit(int cpu, unsigned int irq)
#define __STR(x) #x
#define STR(x) __STR(x)
#ifdef CONFIG_SMP
/*
* SMP has a few special interrupts for IPI messages
*/
#endif /* CONFIG_SMP */
/*
* x86 profiling function, SMP safe. We might want to do this in
* assembly totally?
* is this ever used anyway?
*/
extern char _stext;
static inline void s390_do_profile (unsigned long addr)
......@@ -883,16 +960,19 @@ static inline void s390_do_profile (unsigned long addr)
#include <asm/s390io.h>
#define get_irq_lock(irq) &ioinfo[irq]->irq_lock
#define s390irq_spin_lock(irq) \
spin_lock(&(ioinfo[irq]->irq_lock))
spin_lock(get_irq_lock(irq))
#define s390irq_spin_unlock(irq) \
spin_unlock(&(ioinfo[irq]->irq_lock))
spin_unlock(get_irq_lock(irq))
#define s390irq_spin_lock_irqsave(irq,flags) \
spin_lock_irqsave(&(ioinfo[irq]->irq_lock), flags)
spin_lock_irqsave(get_irq_lock(irq), flags)
#define s390irq_spin_unlock_irqrestore(irq,flags) \
spin_unlock_irqrestore(&(ioinfo[irq]->irq_lock), flags)
spin_unlock_irqrestore(get_irq_lock(irq), flags)
#define touch_nmi_watchdog() do { } while(0)
......
......@@ -45,6 +45,8 @@
#define __LC_CPUADDR 0xD98
#define __LC_IPLDEV 0xDB8
#define __LC_JIFFY_TIMER 0xDC0
#define __LC_PANIC_MAGIC 0xE00
#define __LC_AREGS_SAVE_AREA 0x1340
......@@ -60,7 +62,7 @@
#define _SVC_PSW_MASK 0x0400000180000000
#define _MCCK_PSW_MASK 0x0400000180000000
#define _IO_PSW_MASK 0x0400000180000000
#define _USER_PSW_MASK 0x0701C00180000000
#define _USER_PSW_MASK 0x0705C00180000000
#define _WAIT_PSW_MASK 0x0706000180000000
#define _DW_PSW_MASK 0x0002000180000000
......@@ -158,7 +160,7 @@ struct _lowcore
/* entry.S sensitive area end */
/* SMP info area: defined by DJB */
__u64 jiffy_timer_cc; /* 0xdc0 */
__u64 jiffy_timer; /* 0xdc0 */
__u64 ext_call_fast; /* 0xdc8 */
__u8 pad12[0xe00-0xdd0]; /* 0xdd0 */
......@@ -198,12 +200,12 @@ extern __inline__ void set_prefix(__u32 address)
extern struct _lowcore *lowcore_ptr[];
#ifndef CONFIG_SMP
#define get_cpu_lowcore(cpu) S390_lowcore
#define safe_get_cpu_lowcore(cpu) S390_lowcore
#define get_cpu_lowcore(cpu) (&S390_lowcore)
#define safe_get_cpu_lowcore(cpu) (&S390_lowcore)
#else
#define get_cpu_lowcore(cpu) (*lowcore_ptr[cpu])
#define get_cpu_lowcore(cpu) (lowcore_ptr[(cpu)])
#define safe_get_cpu_lowcore(cpu) \
((cpu)==smp_processor_id() ? S390_lowcore:(*lowcore_ptr[(cpu)]))
((cpu) == smp_processor_id() ? &S390_lowcore : lowcore_ptr[(cpu)])
#endif
#endif /* __ASSEMBLY__ */
......
......@@ -12,6 +12,7 @@
#define PROT_READ 0x1 /* page can be read */
#define PROT_WRITE 0x2 /* page can be written */
#define PROT_EXEC 0x4 /* page can be executed */
#define PROT_SEM 0x8 /* page may be used for atomic ops */
#define PROT_NONE 0x0 /* page can not be accessed */
#define MAP_SHARED 0x01 /* Share changes */
......
......@@ -57,8 +57,8 @@ static inline void copy_page(void *to, void *from)
: "memory" );
}
#define clear_user_page(page, vaddr) clear_page(page)
#define copy_user_page(to, from, vaddr) copy_page(to, from)
#define clear_user_page(page, vaddr, pg) clear_page(page)
#define copy_user_page(to, from, vaddr, pg) copy_page(to, from)
#define BUG() do { \
printk("kernel BUG at %s:%d!\n", __FILE__, __LINE__); \
......@@ -113,9 +113,13 @@ typedef struct { unsigned long pgprot; } pgprot_t;
#define __PAGE_OFFSET 0x0UL
#define PAGE_OFFSET 0x0UL
#define __pa(x) (unsigned long)(x)
#define __va(x) (void *)(x)
#define virt_to_page(kaddr) (mem_map + (__pa(kaddr) >> PAGE_SHIFT))
#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
#define __va(x) (void *)(unsigned long)(x)
#define pfn_to_page(pfn) (mem_map + (pfn))
#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
......
......@@ -4,6 +4,7 @@
/* S/390 systems don't have a PCI bus. This file is just here because some stupid .c code
* includes it even if CONFIG_PCI is not set.
*/
#define PCI_DMA_BUS_IS_PHYS (1)
#endif /* __ASM_S390_PCI_H */
#ifndef __ARCH_S390X_PERCPU__
#define __ARCH_S390X_PERCPU__
#include <asm-generic/percpu.h>
#endif /* __ARCH_S390X_PERCPU__ */
......@@ -17,10 +17,7 @@
#include <asm/processor.h>
#include <linux/threads.h>
#define pgd_quicklist (S390_lowcore.cpu_data.pgd_quick)
#define pmd_quicklist (S390_lowcore.cpu_data.pmd_quick)
#define pte_quicklist (S390_lowcore.cpu_data.pte_quick)
#define pgtable_cache_size (S390_lowcore.cpu_data.pgtable_cache_sz)
#define check_pgt_cache() do { } while (0)
/*
* Allocate and free page tables. The xxx_kernel() versions are
......@@ -28,58 +25,24 @@
* if any.
*/
/*
* page directory allocation/free routines.
*/
extern __inline__ pgd_t *get_pgd_slow (void)
static inline pgd_t *pgd_alloc (struct mm_struct *mm)
{
pgd_t *ret;
pgd_t *pgd;
int i;
ret = (pgd_t *) __get_free_pages(GFP_KERNEL, 2);
if (ret != NULL)
pgd = (pgd_t *) __get_free_pages(GFP_KERNEL,2);
if (pgd != NULL)
for (i = 0; i < PTRS_PER_PGD; i++)
pgd_clear(ret + i);
return ret;
}
extern __inline__ pgd_t *get_pgd_fast (void)
{
unsigned long *ret = pgd_quicklist;
if (ret != NULL) {
pgd_quicklist = (unsigned long *)(*ret);
ret[0] = ret[1];
pgtable_cache_size -= 4;
}
return (pgd_t *) ret;
}
extern __inline__ pgd_t *pgd_alloc (struct mm_struct *mm)
{
pgd_t *pgd;
pgd = get_pgd_fast();
if (!pgd)
pgd = get_pgd_slow();
pgd_clear(pgd + i);
return pgd;
}
extern __inline__ void free_pgd_fast (pgd_t *pgd)
{
*(unsigned long *) pgd = (unsigned long) pgd_quicklist;
pgd_quicklist = (unsigned long *) pgd;
pgtable_cache_size += 4;
}
extern __inline__ void free_pgd_slow (pgd_t *pgd)
static inline void pgd_free (pgd_t *pgd)
{
free_pages((unsigned long) pgd, 2);
}
#define pgd_free(pgd) free_pgd_fast(pgd)
extern inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
{
pgd_val(*pgd) = _PGD_ENTRY | __pa(pmd);
}
......@@ -87,7 +50,7 @@ extern inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmd)
/*
* page middle directory allocation/free routines.
*/
extern inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
static inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
{
pmd_t *pmd;
int i;
......@@ -100,82 +63,67 @@ extern inline pmd_t * pmd_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
return pmd;
}
extern __inline__ pmd_t *
pmd_alloc_one_fast(struct mm_struct *mm, unsigned long address)
static inline void pmd_free (pmd_t *pmd)
{
unsigned long *ret = (unsigned long *) pmd_quicklist;
if (ret != NULL) {
pmd_quicklist = (unsigned long *)(*ret);
ret[0] = ret[1];
pgtable_cache_size -= 4;
}
return (pmd_t *) ret;
free_pages((unsigned long) pmd, 2);
}
extern __inline__ void pmd_free_fast (pmd_t *pmd)
{
*(unsigned long *) pmd = (unsigned long) pmd_quicklist;
pmd_quicklist = (unsigned long *) pmd;
pgtable_cache_size += 4;
}
#define pmd_free_tlb(tlb,pmd) pmd_free(pmd)
extern __inline__ void pmd_free_slow (pmd_t *pmd)
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
{
free_pages((unsigned long) pmd, 2);
pmd_val(*pmd) = _PMD_ENTRY + __pa(pte);
pmd_val1(*pmd) = _PMD_ENTRY + __pa(pte+256);
}
#define pmd_free(pmd) pmd_free_fast(pmd)
extern inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pte_t *pte)
static inline void
pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *page)
{
pmd_val(*pmd) = _PMD_ENTRY | __pa(pte);
pmd_val1(*pmd) = _PMD_ENTRY | __pa(pte+256);
pmd_populate_kernel(mm, pmd, (pte_t *)((page-mem_map) << PAGE_SHIFT));
}
/*
* page table entry allocation/free routines.
*/
extern inline pte_t * pte_alloc_one(struct mm_struct *mm, unsigned long vmaddr)
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long vmaddr)
{
pte_t *pte;
int count;
int i;
count = 0;
do {
pte = (pte_t *) __get_free_page(GFP_KERNEL);
if (pte != NULL) {
for (i=0; i < PTRS_PER_PTE; i++)
pte_clear(pte+i);
} else {
current->state = TASK_UNINTERRUPTIBLE;
schedule_timeout(HZ);
}
} while (!pte && (count++ < 10));
return pte;
}
extern __inline__ pte_t* pte_alloc_one_fast(struct mm_struct *mm, unsigned long address)
static inline struct page *
pte_alloc_one(struct mm_struct *mm, unsigned long addr)
{
unsigned long *ret = (unsigned long *) pte_quicklist;
if (ret != NULL) {
pte_quicklist = (unsigned long *)(*ret);
ret[0] = ret[1];
pgtable_cache_size--;
}
return (pte_t *)ret;
return virt_to_page(pte_alloc_one_kernel(mm, addr));
}
extern __inline__ void pte_free_fast (pte_t *pte)
static inline void pte_free_kernel(pte_t *pte)
{
*(unsigned long *) pte = (unsigned long) pte_quicklist;
pte_quicklist = (unsigned long *) pte;
pgtable_cache_size++;
free_page((unsigned long) pte);
}
extern __inline__ void pte_free_slow (pte_t *pte)
static inline void pte_free(struct page *pte)
{
free_page((unsigned long) pte);
__free_page(pte);
}
#define pte_free(pte) pte_free_fast(pte)
extern int do_check_pgt_cache (int, int);
#define pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
/*
* This establishes kernel virtual mappings (e.g., as a result of a
......@@ -184,142 +132,6 @@ extern int do_check_pgt_cache (int, int);
*/
#define set_pgdir(vmaddr, entry) do { } while(0)
/*
* TLB flushing:
*
* - flush_tlb() flushes the current mm struct TLBs
* - flush_tlb_all() flushes all processes TLBs
* called only from vmalloc/vfree
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
*/
/*
* S/390 has three ways of flushing TLBs
* 'ptlb' does a flush of the local processor
* 'csp' flushes the TLBs on all PUs of a SMP
* 'ipte' invalidates a pte in a page table and flushes that out of
* the TLBs of all PUs of a SMP
*/
#define local_flush_tlb() \
do { __asm__ __volatile__("ptlb": : :"memory"); } while (0)
#ifndef CONFIG_SMP
/*
* We always need to flush, since s390 does not flush tlb
* on each context switch
*/
static inline void flush_tlb(void)
{
local_flush_tlb();
}
static inline void flush_tlb_all(void)
{
local_flush_tlb();
}
static inline void flush_tlb_mm(struct mm_struct *mm)
{
local_flush_tlb();
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
local_flush_tlb();
}
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
local_flush_tlb();
}
#else
#include <asm/smp.h>
static inline void global_flush_tlb(void)
{
long dummy = 0;
__asm__ __volatile__ (
" la 4,3(%0)\n"
" nill 4,0xfffc\n"
" la 4,1(4)\n"
" slr 2,2\n"
" slr 3,3\n"
" csp 2,4"
: : "a" (&dummy) : "cc", "2", "3", "4" );
}
/*
* We only have to do global flush of tlb if process run since last
* flush on any other pu than current.
* If we have threads (mm->count > 1) we always do a global flush,
* since the process runs on more than one processor at the same time.
*/
static inline void __flush_tlb_mm(struct mm_struct * mm)
{
if ((smp_num_cpus > 1) &&
((atomic_read(&mm->mm_count) != 1) ||
(mm->cpu_vm_mask != (1UL << smp_processor_id())))) {
mm->cpu_vm_mask = (1UL << smp_processor_id());
global_flush_tlb();
} else {
local_flush_tlb();
}
}
static inline void flush_tlb(void)
{
__flush_tlb_mm(current->mm);
}
static inline void flush_tlb_all(void)
{
global_flush_tlb();
}
static inline void flush_tlb_mm(struct mm_struct *mm)
{
__flush_tlb_mm(mm);
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
__flush_tlb_mm(vma->vm_mm);
}
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
__flush_tlb_mm(vma->vm_mm);
}
#endif
extern inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
/* S/390 does not keep any page table caches in TLB */
}
static inline int ptep_test_and_clear_and_flush_young(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
/* No need to flush TLB; bits are in storage key */
return ptep_test_and_clear_young(ptep);
}
static inline int ptep_test_and_clear_and_flush_dirty(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
/* No need to flush TLB; bits are in storage key */
return ptep_test_and_clear_dirty(ptep);
}
static inline pte_t ptep_invalidate(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
......
......@@ -29,17 +29,6 @@
extern pgd_t swapper_pg_dir[] __attribute__ ((aligned (4096)));
extern void paging_init(void);
/* Caches aren't brain-dead on S390. */
#define flush_cache_all() do { } while (0)
#define flush_cache_mm(mm) do { } while (0)
#define flush_cache_range(vma, start, end) do { } while (0)
#define flush_cache_page(vma, vmaddr) do { } while (0)
#define flush_page_to_ram(page) do { } while (0)
#define flush_dcache_page(page) do { } while (0)
#define flush_icache_range(start, end) do { } while (0)
#define flush_icache_page(vma,pg) do { } while (0)
#define flush_icache_user_range(vma,pg,adr,len) do { } while (0)
/*
* The S390 doesn't have any external MMU info: the kernel page
* tables contain all the necessary information.
......@@ -158,7 +147,8 @@ extern char empty_zero_page[PAGE_SIZE];
/* Bits in the page table entry */
#define _PAGE_PRESENT 0x001 /* Software */
#define _PAGE_MKCLEAR 0x002 /* Software */
#define _PAGE_MKCLEAN 0x002 /* Software */
#define _PAGE_ISCLEAN 0x004 /* Software */
#define _PAGE_RO 0x200 /* HW read-only */
#define _PAGE_INVALID 0x400 /* HW invalid */
......@@ -186,32 +176,34 @@ extern char empty_zero_page[PAGE_SIZE];
* No mapping available
*/
#define PAGE_INVALID __pgprot(_PAGE_INVALID)
#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_INVALID)
#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_RO)
#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_RO)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT )
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT )
#define PAGE_NONE_SHARED __pgprot(_PAGE_PRESENT|_PAGE_INVALID)
#define PAGE_NONE_PRIVATE __pgprot(_PAGE_PRESENT|_PAGE_INVALID|_PAGE_ISCLEAN)
#define PAGE_RO_SHARED __pgprot(_PAGE_PRESENT|_PAGE_RO)
#define PAGE_RO_PRIVATE __pgprot(_PAGE_PRESENT|_PAGE_RO|_PAGE_ISCLEAN)
#define PAGE_COPY __pgprot(_PAGE_PRESENT|_PAGE_RO|_PAGE_ISCLEAN)
#define PAGE_SHARED __pgprot(_PAGE_PRESENT)
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT)
/*
* The S390 can't do page protection for execute, and considers that the
* same are read. Also, write permissions imply read permissions. This is
* the closest we can get..
*/
#define __P000 PAGE_NONE
#define __P001 PAGE_READONLY
#define __P000 PAGE_NONE_PRIVATE
#define __P001 PAGE_RO_PRIVATE
#define __P010 PAGE_COPY
#define __P011 PAGE_COPY
#define __P100 PAGE_READONLY
#define __P101 PAGE_READONLY
#define __P100 PAGE_RO_PRIVATE
#define __P101 PAGE_RO_PRIVATE
#define __P110 PAGE_COPY
#define __P111 PAGE_COPY
#define __S000 PAGE_NONE
#define __S001 PAGE_READONLY
#define __S000 PAGE_NONE_SHARED
#define __S001 PAGE_RO_SHARED
#define __S010 PAGE_SHARED
#define __S011 PAGE_SHARED
#define __S100 PAGE_READONLY
#define __S101 PAGE_READONLY
#define __S100 PAGE_RO_SHARED
#define __S101 PAGE_RO_SHARED
#define __S110 PAGE_SHARED
#define __S111 PAGE_SHARED
......@@ -222,10 +214,10 @@ extern char empty_zero_page[PAGE_SIZE];
*/
extern inline void set_pte(pte_t *pteptr, pte_t pteval)
{
if ((pte_val(pteval) & (_PAGE_MKCLEAR|_PAGE_INVALID))
== _PAGE_MKCLEAR)
if ((pte_val(pteval) & (_PAGE_MKCLEAN|_PAGE_INVALID))
== _PAGE_MKCLEAN)
{
pte_val(pteval) &= ~_PAGE_MKCLEAR;
pte_val(pteval) &= ~_PAGE_MKCLEAN;
asm volatile ("sske %0,%1"
: : "d" (0), "a" (pte_val(pteval)));
......@@ -234,8 +226,6 @@ extern inline void set_pte(pte_t *pteptr, pte_t pteval)
*pteptr = pteval;
}
#define pages_to_mb(x) ((x) >> (20-PAGE_SHIFT))
/*
* pgd/pmd/pte query functions
*/
......@@ -295,6 +285,8 @@ extern inline int pte_dirty(pte_t pte)
{
int skey;
if (pte_val(pte) & _PAGE_ISCLEAN)
return 0;
asm volatile ("iske %0,%1" : "=d" (skey) : "a" (pte_val(pte)));
return skey & _PAGE_CHANGED;
}
......@@ -326,15 +318,14 @@ extern inline void pte_clear(pte_t *ptep)
pte_val(*ptep) = _PAGE_INVALID;
}
#define PTE_INIT(x) pte_clear(x)
/*
* The following pte_modification functions only work if
* pte_present() is true. Undefined behaviour if not..
*/
extern inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pte_val(pte) = (pte_val(pte) & PAGE_MASK) | pgprot_val(newprot);
pte_val(pte) &= PAGE_MASK | _PAGE_ISCLEAN;
pte_val(pte) |= pgprot_val(newprot) & ~_PAGE_ISCLEAN;
return pte;
}
......@@ -361,13 +352,11 @@ extern inline pte_t pte_mkclean(pte_t pte)
extern inline pte_t pte_mkdirty(pte_t pte)
{
/* We can't set the changed bit atomically either. For now we
* set (!) the page referenced bit. */
asm volatile ("sske %0,%1"
: : "d" (_PAGE_CHANGED|_PAGE_REFERENCED),
"a" (pte_val(pte)));
pte_val(pte) &= ~_PAGE_MKCLEAR;
/* We do not explicitly set the dirty bit because the
* sske instruction is slow. It is faster to let the
* next instruction set the dirty bit.
*/
pte_val(pte) &= ~(_PAGE_MKCLEAN | _PAGE_ISCLEAN);
return pte;
}
......@@ -401,6 +390,8 @@ static inline int ptep_test_and_clear_dirty(pte_t *ptep)
{
int skey;
if (pte_val(*ptep) & _PAGE_ISCLEAN)
return 0;
asm volatile ("iske %0,%1" : "=d" (skey) : "a" (*ptep));
if ((skey & _PAGE_CHANGED) == 0)
return 0;
......@@ -443,42 +434,70 @@ extern inline pte_t mk_pte_phys(unsigned long physpage, pgprot_t pgprot)
#define mk_pte(pg, pgprot) \
({ \
struct page *__page = (pg); \
pgprot_t __pgprot = (pgprot); \
unsigned long __physpage = __pa((__page-mem_map) << PAGE_SHIFT); \
pte_t __pte = mk_pte_phys(__physpage, (pgprot)); \
pte_t __pte = mk_pte_phys(__physpage, __pgprot); \
\
if (__page != ZERO_PAGE(__physpage)) { \
int __users = page_count(__page); \
__users -= !!PagePrivate(page) + !!__page->mapping; \
\
if (__users == 1) \
pte_val(__pte) |= _PAGE_MKCLEAR; \
if (!(pgprot_val(__pgprot) & _PAGE_ISCLEAN)) { \
int __users = !!PagePrivate(__page) + !!__page->mapping; \
if (__users + page_count(__page) == 1) \
pte_val(__pte) |= _PAGE_MKCLEAN; \
} \
__pte; \
})
#define pfn_pte(pfn, pgprot) \
({ \
struct page *__page = mem_map+(pfn); \
pgprot_t __pgprot = (pgprot); \
unsigned long __physpage = __pa((pfn) << PAGE_SHIFT); \
pte_t __pte = mk_pte_phys(__physpage, __pgprot); \
\
if (!(pgprot_val(__pgprot) & _PAGE_ISCLEAN)) { \
int __users = !!PagePrivate(__page) + !!__page->mapping; \
if (__users + page_count(__page) == 1) \
pte_val(__pte) |= _PAGE_MKCLEAN; \
} \
__pte; \
})
#define pte_page(x) (mem_map+(unsigned long)((pte_val(x) >> PAGE_SHIFT)))
#define pfn_pmd(pfn, pgprot) \
({ \
pgprot_t __pgprot = (pgprot); \
unsigned long __physpage = __pa((pfn) << PAGE_SHIFT); \
pmd_t __pmd = __pmd(__physpage + pgprot_val(__pgprot)); \
__pmd; \
})
#define pte_pfn(x) (pte_val(x) >> PAGE_SHIFT)
#define pte_page(x) pfn_to_page(pte_pfn(x))
#define pmd_page_kernel(pmd) (pmd_val(pmd) & PAGE_MASK)
#define pmd_page(pmd) \
((unsigned long) __va(pmd_val(pmd) & PAGE_MASK))
#define pmd_page(pmd) (mem_map+(pmd_val(pmd) >> PAGE_SHIFT))
#define pgd_page_kernel(pgd) (pgd_val(pgd) & PAGE_MASK)
/* to find an entry in a page-table-directory */
#define pgd_index(address) ((address >> PGDIR_SHIFT) & (PTRS_PER_PGD-1))
#define pgd_offset(mm, address) ((mm)->pgd+pgd_index(address))
#define pgd_page(pmd) \
((unsigned long) __va(pgd_val(pmd) & PAGE_MASK))
#define pgd_offset(mm, address) ((mm)->pgd + pgd_index(address))
/* to find an entry in a kernel page-table-directory */
#define pgd_offset_k(address) pgd_offset(&init_mm, address)
/* Find an entry in the second-level page table.. */
#define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
#define pmd_offset(dir,addr) \
((pmd_t *) pgd_page(*(dir)) + (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)))
((pmd_t *) pgd_page_kernel(*(dir)) + __pmd_offset(addr))
/* Find an entry in the third-level page table.. */
#define pte_offset(dir,addr) \
((pte_t *) pmd_page(*(dir)) + (((addr) >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)))
#define __pte_offset(address) (((address) >> PAGE_SHIFT) & (PTRS_PER_PTE-1))
#define pte_offset_kernel(pmd, address) \
((pte_t *) pmd_page_kernel(*(pmd)) + __pte_offset(address))
#define pte_offset_map(pmd, address) pte_offset_kernel(pmd, address)
#define pte_offset_map_nested(pmd, address) pte_offset_kernel(pmd, address)
#define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0)
/*
* A page-table entry has some bits we have to treat in a special way.
......
......@@ -16,6 +16,7 @@
#include <asm/page.h>
#include <asm/ptrace.h>
#ifdef __KERNEL__
/*
* Default implementation of macro that returns current
* instruction pointer ("program counter").
......@@ -59,7 +60,7 @@ extern struct task_struct *last_task_used_math;
/*
* User space process size: 4TB (default).
*/
#define TASK_SIZE (0x40000000000UL)
#define TASK_SIZE (0x20000000000UL)
#define TASK31_SIZE (0x80000000UL)
/* This decides where the kernel will search for a free chunk of vm
......@@ -80,7 +81,6 @@ typedef struct {
struct thread_struct
{
struct pt_regs *regs; /* the user registers can be found on*/
s390_fp_regs fp_regs;
__u32 ar2; /* kernel access register 2 */
__u32 ar4; /* kernel access register 4 */
......@@ -99,8 +99,7 @@ struct thread_struct
typedef struct thread_struct thread_struct;
#define INIT_THREAD { (struct pt_regs *) 0, \
{ 0,{{0},{0},{0},{0},{0},{0},{0},{0},{0},{0}, \
#define INIT_THREAD {{0,{{0},{0},{0},{0},{0},{0},{0},{0},{0},{0}, \
{0},{0},{0},{0},{0},{0}}}, \
0, 0, \
sizeof(init_stack) + (addr_t) &init_stack, \
......@@ -125,6 +124,7 @@ typedef struct thread_struct thread_struct;
/* Forward declaration, a strange C thing */
struct task_struct;
struct mm_struct;
/* Free all resources held by a thread. */
......@@ -136,28 +136,20 @@ extern int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags);
#define release_segments(mm) do { } while (0)
/*
* Return saved PC of a blocked thread. used in kernel/sched
* Return saved PC of a blocked thread.
*/
extern inline unsigned long thread_saved_pc(struct thread_struct *t)
{
return (t->regs) ? ((unsigned long)t->regs->psw.addr) : 0;
}
extern inline unsigned long thread_saved_pc(struct task_struct *t);
unsigned long get_wchan(struct task_struct *p);
#define KSTK_EIP(tsk) ((tsk)->thread.regs->psw.addr)
#define KSTK_ESP(tsk) ((tsk)->thread.ksp)
/* Allocation and freeing of basic task resources. */
/*
* NOTE! The task struct and the stack go together
* Print register of task into buffer. Used in fs/proc/array.c.
*/
#define alloc_task_struct() \
((struct task_struct *) __get_free_pages(GFP_KERNEL,2))
#define free_task_struct(p) free_pages((unsigned long)(p),2)
#define get_task_struct(tsk) atomic_inc(&virt_to_page(tsk)->count)
extern char *task_show_regs(struct task_struct *task, char *buffer);
#define init_task (init_task_union.task)
#define init_stack (init_task_union.stack)
unsigned long get_wchan(struct task_struct *p);
#define __KSTK_PTREGS(tsk) ((struct pt_regs *) \
(((addr_t) tsk->thread_info + THREAD_SIZE - sizeof(struct pt_regs)) & -8L))
#define KSTK_EIP(tsk) (__KSTK_PTREGS(tsk)->psw.addr)
#define KSTK_ESP(tsk) (__KSTK_PTREGS(tsk)->gprs[15])
#define cpu_relax() do { } while (0)
......@@ -174,6 +166,43 @@ unsigned long get_wchan(struct task_struct *p);
#define USER_STD_MASK 0x0000000000000080UL
#define PSW_PROBLEM_STATE 0x0001000000000000UL
/*
* Set PSW mask to specified value, while leaving the
* PSW addr pointing to the next instruction.
*/
static inline void __load_psw_mask (unsigned long mask)
{
unsigned long addr;
psw_t psw;
psw.mask = mask;
asm volatile (
" larl %0,1f\n"
" stg %0,8(%1)\n"
" lpswe 0(%1)\n"
"1:"
: "=&d" (addr) : "a" (&psw) : "memory", "cc" );
}
/*
* Function to stop a processor until an interruption occured
*/
static inline void enabled_wait(void)
{
unsigned long reg;
psw_t wait_psw;
wait_psw.mask = 0x0706000180000000;
asm volatile (
" larl %0,0f\n"
" stg %0,8(%1)\n"
" lpswe 0(%1)\n"
"0:"
: "=&a" (reg) : "a" (&wait_psw) : "memory", "cc" );
}
/*
* Function to drop a processor into disabled wait state
*/
......@@ -223,5 +252,7 @@ static inline void disabled_wait(addr_t code)
: : "a" (dw_psw), "a" (&ctl_buf) : "cc", "0", "1");
}
#endif
#endif /* __ASM_S390_PROCESSOR_H */
......@@ -85,12 +85,16 @@
#define STACK_FRAME_OVERHEAD 160 /* size of minimum stack frame */
#define PTRACE_SETOPTIONS 21
/* options set using PTRACE_SETOPTIONS */
#define PTRACE_O_TRACESYSGOOD 0x00000001
#ifndef __ASSEMBLY__
#include <linux/config.h>
#include <linux/stddef.h>
#include <linux/types.h>
#include <asm/current.h>
#include <asm/setup.h>
/* this typedef defines how a Program Status Word looks like */
......@@ -98,7 +102,7 @@ typedef struct
{
__u64 mask;
__u64 addr;
} psw_t __attribute__ ((aligned(8)));
} __attribute__ ((aligned(8))) psw_t;
#ifdef __KERNEL__
#define FIX_PSW(addr) ((unsigned long)(addr))
......@@ -130,8 +134,8 @@ typedef struct
#define FPC_VALID_MASK 0xF8F8FF03
/*
* The first entries in pt_regs, gdb_pt_regs and user_regs_struct
* are common for all three structures. The s390_regs structure
* The first entries in pt_regs and user_regs_struct
* are common for the two structures. The s390_regs structure
* covers the common parts. It simplifies copying the common part
* between the three structures.
*/
......@@ -154,33 +158,15 @@ struct pt_regs
__u32 acrs[NUM_ACRS];
__u64 orig_gpr2;
__u32 trap;
__u32 old_ilc;
} __attribute__ ((packed));
/*
* The gdb_pt_regs struct is used instead of the pt_regs structure
* if kernel remote debugging is used.
*/
#if CONFIG_REMOTE_DEBUG
struct gdb_pt_regs
{
psw_t psw;
__u64 gprs[NUM_GPRS];
__u32 acrs[NUM_ACRS];
__u64 orig_gpr2;
__u32 trap;
__u32 crs[16];
s390_fp_regs fp_regs;
};
#endif
/*
* Now for the program event recording (trace) definitions.
*/
typedef struct
{
__u64 cr[3];
} per_cr_words __attribute__((packed));
} per_cr_words;
#define PER_EM_MASK 0x00000000E8000000UL
......@@ -203,14 +189,14 @@ typedef struct
unsigned : 21;
addr_t starting_addr;
addr_t ending_addr;
} per_cr_bits __attribute__((packed));
} per_cr_bits;
typedef struct
{
__u16 perc_atmid;
addr_t address;
__u8 access_id;
} per_lowcore_words __attribute__((packed));
} per_lowcore_words;
typedef struct
{
......@@ -230,14 +216,14 @@ typedef struct
addr_t address; /* 0x098 */
unsigned : 4; /* 0x0a1 */
unsigned access_id : 4;
} per_lowcore_bits __attribute__((packed));
} per_lowcore_bits;
typedef struct
{
union {
per_cr_words words;
per_cr_bits bits;
} control_regs __attribute__((packed));
} control_regs;
/*
* Use these flags instead of setting em_instruction_fetch
* directly they are used so that single stepping can be
......@@ -256,7 +242,7 @@ typedef struct
per_lowcore_words words;
per_lowcore_bits bits;
} lowcore;
} per_struct __attribute__((packed));
} per_struct;
typedef struct
{
......@@ -326,7 +312,6 @@ struct user_regs_struct
#define user_mode(regs) (((regs)->psw.mask & PSW_PROBLEM_STATE) != 0)
#define instruction_pointer(regs) ((regs)->psw.addr)
extern void show_regs(struct pt_regs * regs);
extern char *task_show_regs(struct task_struct *task, char *buffer);
#endif
#endif /* __ASSEMBLY__ */
......
/*
* linux/include/asm/qdio.h
*
* Linux for S/390 QDIO base support, Hipersocket base support
* version 2
*
* Copyright 2000,2002 IBM Corporation
* Author(s): Utz Bacher <utz.bacher@de.ibm.com>
*
*/
#ifndef __QDIO_H__
#define __QDIO_H__
#define VERSION_QDIO_H "$Revision: 1.48 $"
/* note, that most of the typedef's are from ingo. */
#include <linux/interrupt.h>
//#define QDIO_DBF_LIKE_HELL
#define QDIO_NAME "qdio "
#define QDIO_VERBOSE_LEVEL 9
#ifndef CONFIG_ARCH_S390X
#define QDIO_32_BIT
#endif /* CONFIG_ARCH_S390X */
#define QDIO_USE_PROCESSING_STATE
#ifdef CONFIG_QDIO_PERF_STATS
#define QDIO_PERFORMANCE_STATS
#endif /* CONFIG_QDIO_PERF_STATS */
#define QDIO_MINIMAL_BH_RELIEF_TIME 16
#define QDIO_TIMER_POLL_VALUE 1
#define IQDIO_TIMER_POLL_VALUE 1
/**** CONSTANTS, that are relied on without using these symbols *****/
#define QDIO_MAX_QUEUES_PER_IRQ 32 /* used in width of unsigned int */
/************************ END of CONSTANTS **************************/
#define QDIO_MAX_BUFFERS_PER_Q 128 /* must be a power of 2 (%x=&(x-1)*/
#define QDIO_BUF_ORDER 7 /* 2**this == number of pages used for sbals in 1 q */
#define QDIO_MAX_ELEMENTS_PER_BUFFER 16
#define SBAL_SIZE 256
#define IQDIO_FILL_LEVEL_TO_POLL (QDIO_MAX_BUFFERS_PER_Q*4/3)
#define IQDIO_THININT_ISC 3
#define IQDIO_DELAY_TARGET 0
#define QDIO_BUSY_BIT_PATIENCE 2000 /* in microsecs */
#define IQDIO_GLOBAL_LAPS 2 /* GLOBAL_LAPS are not used as we */
#define IQDIO_GLOBAL_LAPS_INT 1 /* dont global summary */
#define IQDIO_LOCAL_LAPS 4
#define IQDIO_LOCAL_LAPS_INT 1
#define IQDIO_GLOBAL_SUMMARY_CC_MASK 2
/*#define IQDIO_IQDC_INT_PARM 0x1234*/
#define QDIO_Q_LAPS 5
#define QDIO_STORAGE_KEY 0
#define L2_CACHELINE_SIZE 256
#define INDICATORS_PER_CACHELINE (L2_CACHELINE_SIZE/sizeof(__u32))
#define QDIO_PERF "qdio_perf"
/* must be a power of 2 */
/*#define QDIO_STATS_NUMBER 4
#define QDIO_STATS_CLASSES 2
#define QDIO_STATS_COUNT_NEEDED 2*/
#define QDIO_NO_USE_COUNT_TIME 10
#define QDIO_NO_USE_COUNT_TIMEOUT 1000 /* wait for 1 sec on each q before
exiting without having use_count
of the queue to 0 */
#define QDIO_ESTABLISH_TIMEOUT 1000
#define QDIO_ACTIVATE_TIMEOUT 100
#define QDIO_CLEANUP_CLEAR_TIMEOUT 20000
#define QDIO_CLEANUP_HALT_TIMEOUT 10000
#define QDIO_BH AURORA_BH
#define QDIO_IRQ_BUCKETS 256 /* heavy..., but does only use a few bytes, but
be rather faster in cases of collisions
(if there really is a collision, it is
on every (traditional) interrupt and every
do_QDIO, so we rather are generous */
#define QDIO_QETH_QFMT 0
#define QDIO_ZFCP_QFMT 1
#define QDIO_IQDIO_QFMT 2
#define QDIO_IRQ_STATE_FRESH 0 /* must be 0 -> memset has set it to 0 */
#define QDIO_IRQ_STATE_INACTIVE 1
#define QDIO_IRQ_STATE_ESTABLISHED 2
#define QDIO_IRQ_STATE_ACTIVE 3
#define QDIO_IRQ_STATE_STOPPED 4
/* used as intparm in do_IO: */
#define QDIO_DOING_SENSEID 0
#define QDIO_DOING_ESTABLISH 1
#define QDIO_DOING_ACTIVATE 2
#define QDIO_DOING_CLEANUP 3
/************************* DEBUG FACILITY STUFF *********************/
/* #define QDIO_DBF_LIKE_HELL */
#define QDIO_DBF_HEX(ex,name,level,addr,len) \
do { \
if (ex) \
debug_exception(qdio_dbf_##name,level,(void*)(addr),len); \
else \
debug_event(qdio_dbf_##name,level,(void*)(addr),len); \
} while (0)
#define QDIO_DBF_TEXT(ex,name,level,text) \
do { \
if (ex) \
debug_text_exception(qdio_dbf_##name,level,text); \
else \
debug_text_event(qdio_dbf_##name,level,text); \
} while (0)
#define QDIO_DBF_HEX0(ex,name,addr,len) QDIO_DBF_HEX(ex,name,0,addr,len)
#define QDIO_DBF_HEX1(ex,name,addr,len) QDIO_DBF_HEX(ex,name,1,addr,len)
#define QDIO_DBF_HEX2(ex,name,addr,len) QDIO_DBF_HEX(ex,name,2,addr,len)
#define QDIO_DBF_HEX3(ex,name,addr,len) QDIO_DBF_HEX(ex,name,3,addr,len)
#define QDIO_DBF_HEX4(ex,name,addr,len) QDIO_DBF_HEX(ex,name,4,addr,len)
#define QDIO_DBF_HEX5(ex,name,addr,len) QDIO_DBF_HEX(ex,name,5,addr,len)
#define QDIO_DBF_HEX6(ex,name,addr,len) QDIO_DBF_HEX(ex,name,6,addr,len)
#ifdef QDIO_DBF_LIKE_HELL
#endif /* QDIO_DBF_LIKE_HELL */
#if 0
#define QDIO_DBF_HEX0(ex,name,addr,len) do {} while (0)
#define QDIO_DBF_HEX1(ex,name,addr,len) do {} while (0)
#define QDIO_DBF_HEX2(ex,name,addr,len) do {} while (0)
#ifndef QDIO_DBF_LIKE_HELL
#define QDIO_DBF_HEX3(ex,name,addr,len) do {} while (0)
#define QDIO_DBF_HEX4(ex,name,addr,len) do {} while (0)
#define QDIO_DBF_HEX5(ex,name,addr,len) do {} while (0)
#define QDIO_DBF_HEX6(ex,name,addr,len) do {} while (0)
#endif /* QDIO_DBF_LIKE_HELL */
#endif /* 0 */
#define QDIO_DBF_TEXT0(ex,name,text) QDIO_DBF_TEXT(ex,name,0,text)
#define QDIO_DBF_TEXT1(ex,name,text) QDIO_DBF_TEXT(ex,name,1,text)
#define QDIO_DBF_TEXT2(ex,name,text) QDIO_DBF_TEXT(ex,name,2,text)
#define QDIO_DBF_TEXT3(ex,name,text) QDIO_DBF_TEXT(ex,name,3,text)
#define QDIO_DBF_TEXT4(ex,name,text) QDIO_DBF_TEXT(ex,name,4,text)
#define QDIO_DBF_TEXT5(ex,name,text) QDIO_DBF_TEXT(ex,name,5,text)
#define QDIO_DBF_TEXT6(ex,name,text) QDIO_DBF_TEXT(ex,name,6,text)
#ifdef QDIO_DBF_LIKE_HELL
#endif /* QDIO_DBF_LIKE_HELL */
#if 0
#define QDIO_DBF_TEXT0(ex,name,text) do {} while (0)
#define QDIO_DBF_TEXT1(ex,name,text) do {} while (0)
#define QDIO_DBF_TEXT2(ex,name,text) do {} while (0)
#ifndef QDIO_DBF_LIKE_HELL
#define QDIO_DBF_TEXT3(ex,name,text) do {} while (0)
#define QDIO_DBF_TEXT4(ex,name,text) do {} while (0)
#define QDIO_DBF_TEXT5(ex,name,text) do {} while (0)
#define QDIO_DBF_TEXT6(ex,name,text) do {} while (0)
#endif /* QDIO_DBF_LIKE_HELL */
#endif /* 0 */
#define QDIO_DBF_SETUP_NAME "qdio_setup"
#define QDIO_DBF_SETUP_LEN 8
#define QDIO_DBF_SETUP_INDEX 2
#define QDIO_DBF_SETUP_NR_AREAS 1
#ifdef QDIO_DBF_LIKE_HELL
#define QDIO_DBF_SETUP_LEVEL 6
#else /* QDIO_DBF_LIKE_HELL */
#define QDIO_DBF_SETUP_LEVEL 2
#endif /* QDIO_DBF_LIKE_HELL */
#define QDIO_DBF_SBAL_NAME "qdio_labs" /* sbal */
#define QDIO_DBF_SBAL_LEN 256
#define QDIO_DBF_SBAL_INDEX 2
#define QDIO_DBF_SBAL_NR_AREAS 2
#ifdef QDIO_DBF_LIKE_HELL
#define QDIO_DBF_SBAL_LEVEL 6
#else /* QDIO_DBF_LIKE_HELL */
#define QDIO_DBF_SBAL_LEVEL 2
#endif /* QDIO_DBF_LIKE_HELL */
#define QDIO_DBF_TRACE_NAME "qdio_trace"
#define QDIO_DBF_TRACE_LEN 8
#define QDIO_DBF_TRACE_NR_AREAS 2
#ifdef QDIO_DBF_LIKE_HELL
#define QDIO_DBF_TRACE_INDEX 4
#define QDIO_DBF_TRACE_LEVEL 4 /* -------- could be even more verbose here */
#else /* QDIO_DBF_LIKE_HELL */
#define QDIO_DBF_TRACE_INDEX 2
#define QDIO_DBF_TRACE_LEVEL 2
#endif /* QDIO_DBF_LIKE_HELL */
#define QDIO_DBF_SENSE_NAME "qdio_sense"
#define QDIO_DBF_SENSE_LEN 64
#define QDIO_DBF_SENSE_INDEX 1
#define QDIO_DBF_SENSE_NR_AREAS 1
#ifdef QDIO_DBF_LIKE_HELL
#define QDIO_DBF_SENSE_LEVEL 6
#else /* QDIO_DBF_LIKE_HELL */
#define QDIO_DBF_SENSE_LEVEL 2
#endif /* QDIO_DBF_LIKE_HELL */
#ifdef QDIO_DBF_LIKE_HELL
#define QDIO_TRACE_QTYPE QDIO_ZFCP_QFMT
#define QDIO_DBF_SLSB_OUT_NAME "qdio_slsb_out"
#define QDIO_DBF_SLSB_OUT_LEN QDIO_MAX_BUFFERS_PER_Q
#define QDIO_DBF_SLSB_OUT_INDEX 8
#define QDIO_DBF_SLSB_OUT_NR_AREAS 1
#define QDIO_DBF_SLSB_OUT_LEVEL 6
#define QDIO_DBF_SLSB_IN_NAME "qdio_slsb_in"
#define QDIO_DBF_SLSB_IN_LEN QDIO_MAX_BUFFERS_PER_Q
#define QDIO_DBF_SLSB_IN_INDEX 8
#define QDIO_DBF_SLSB_IN_NR_AREAS 1
#define QDIO_DBF_SLSB_IN_LEVEL 6
#endif /* QDIO_DBF_LIKE_HELL */
/****************** END OF DEBUG FACILITY STUFF *********************/
typedef struct qdio_buffer_element_t {
unsigned int flags;
unsigned int length;
#ifdef QDIO_32_BIT
void *reserved;
#endif /* QDIO_32_BIT */
void *addr;
} __attribute__ ((packed,aligned(16))) qdio_buffer_element_t;
typedef struct qdio_buffer_t {
volatile qdio_buffer_element_t element[16];
} __attribute__ ((packed,aligned(256))) qdio_buffer_t;
/* params are: irq, status, qdio_error, siga_error,
queue_number, first element processed, number of elements processed,
int_parm */
typedef void qdio_handler_t(int,unsigned int,unsigned int,unsigned int,
unsigned int,int,int,unsigned long);
#define QDIO_STATUS_INBOUND_INT 0x01
#define QDIO_STATUS_OUTBOUND_INT 0x02
#define QDIO_STATUS_LOOK_FOR_ERROR 0x04
#define QDIO_STATUS_MORE_THAN_ONE_QDIO_ERROR 0x08
#define QDIO_STATUS_MORE_THAN_ONE_SIGA_ERROR 0x10
#define QDIO_STATUS_ACTIVATE_CHECK_CONDITION 0x20
#define QDIO_SIGA_ERROR_ACCESS_EXCEPTION 0x10
#define QDIO_SIGA_ERROR_B_BIT_SET 0x20
/* for qdio_initialize */
#define QDIO_INBOUND_0COPY_SBALS 0x01
#define QDIO_OUTBOUND_0COPY_SBALS 0x02
#define QDIO_USE_OUTBOUND_PCIS 0x04
/* for qdio_cleanup */
#define QDIO_FLAG_CLEANUP_USING_CLEAR 0x01
#define QDIO_FLAG_CLEANUP_USING_HALT 0x02
typedef struct qdio_initialize_t {
int irq;
unsigned char q_format;
unsigned char adapter_name[8];
unsigned int qib_param_field_format; /*adapter dependent*/
/* pointer to 128 bytes or NULL, if no param field */
unsigned char *qib_param_field; /* adapter dependent */
/* pointer to no_queues*128 words of data or NULL */
unsigned long *input_slib_elements;
unsigned long *output_slib_elements;
unsigned int min_input_threshold;
unsigned int max_input_threshold;
unsigned int min_output_threshold;
unsigned int max_output_threshold;
unsigned int no_input_qs;
unsigned int no_output_qs;
qdio_handler_t *input_handler;
qdio_handler_t *output_handler;
unsigned long int_parm;
unsigned long flags;
void **input_sbal_addr_array; /* addr of n*128 void ptrs */
void **output_sbal_addr_array; /* addr of n*128 void ptrs */
} qdio_initialize_t;
extern int qdio_initialize(qdio_initialize_t *init_data);
extern int qdio_activate(int irq,int flags);
#define QDIO_STATE_MUST_USE_OUTB_PCI 0x00000001
#define QDIO_STATE_INACTIVE 0x00000002 /* after qdio_cleanup */
#define QDIO_STATE_ESTABLISHED 0x00000004 /* after qdio_initialize */
#define QDIO_STATE_ACTIVE 0x00000008 /* after qdio_activate */
#define QDIO_STATE_STOPPED 0x00000010 /* after queues went down */
extern unsigned long qdio_get_status(int irq);
#define QDIO_FLAG_SYNC_INPUT 0x01
#define QDIO_FLAG_SYNC_OUTPUT 0x02
#define QDIO_FLAG_UNDER_INTERRUPT 0x04
#define QDIO_FLAG_NO_INPUT_INTERRUPT_CONTEXT 0x08 /* no effect on
adapter interrupts */
#define QDIO_FLAG_DONT_SIGA 0x10
extern int do_QDIO(int irq,unsigned int flags, unsigned int queue_number,
unsigned int qidx,unsigned int count,
qdio_buffer_t *buffers);
extern int qdio_synchronize(int irq,unsigned int flags,
unsigned int queue_number);
extern int qdio_cleanup(int irq,int how);
unsigned char qdio_get_slsb_state(int irq,unsigned int flag,
unsigned int queue_number,
unsigned int qidx);
extern void qdio_init_scrubber(void);
/*
* QDIO device commands returned by extended Sense-ID
*/
#define DEFAULT_ESTABLISH_QS_CMD 0x1b
#define DEFAULT_ESTABLISH_QS_COUNT 0x1000
#define DEFAULT_ACTIVATE_QS_CMD 0x1f
#define DEFAULT_ACTIVATE_QS_COUNT 0
typedef struct _qdio_cmds {
unsigned char rcd; /* read configuration data */
unsigned short count_rcd;
unsigned char sii; /* set interface identifier */
unsigned short count_sii;
unsigned char rni; /* read node identifier */
unsigned short count_rni;
unsigned char eq; /* establish QDIO queues */
unsigned short count_eq;
unsigned char aq; /* activate QDIO queues */
unsigned short count_aq;
} qdio_cmds_t;
/*
* additional CIWs returned by extended Sense-ID
*/
#define CIW_TYPE_EQUEUE 0x3 /* establish QDIO queues */
#define CIW_TYPE_AQUEUE 0x4 /* activate QDIO queues */
typedef struct _qdesfmt0 {
#ifdef QDIO_32_BIT
unsigned long res1; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long sliba; /* storage-list-information-block
address */
#ifdef QDIO_32_BIT
unsigned long res2; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long sla; /* storage-list address */
#ifdef QDIO_32_BIT
unsigned long res3; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long slsba; /* storage-list-state-block address */
unsigned int res4; /* reserved */
unsigned int akey : 4; /* access key for DLIB */
unsigned int bkey : 4; /* access key for SL */
unsigned int ckey : 4; /* access key for SBALs */
unsigned int dkey : 4; /* access key for SLSB */
unsigned int res5 : 16; /* reserved */
} __attribute__ ((packed)) qdesfmt0_t;
/*
* Queue-Description record (QDR)
*/
typedef struct _qdr {
unsigned int qfmt : 8; /* queue format */
unsigned int pfmt : 8; /* impl. dep. parameter format */
unsigned int res1 : 8; /* reserved */
unsigned int ac : 8; /* adapter characteristics */
unsigned int res2 : 8; /* reserved */
unsigned int iqdcnt : 8; /* input-queue-descriptor count */
unsigned int res3 : 8; /* reserved */
unsigned int oqdcnt : 8; /* output-queue-descriptor count */
unsigned int res4 : 8; /* reserved */
unsigned int iqdsz : 8; /* input-queue-descriptor size */
unsigned int res5 : 8; /* reserved */
unsigned int oqdsz : 8; /* output-queue-descriptor size */
unsigned int res6[9]; /* reserved */
#ifdef QDIO_32_BIT
unsigned long res7; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long qiba; /* queue-information-block address */
unsigned int res8; /* reserved */
unsigned int qkey : 4; /* queue-informatio-block key */
unsigned int res9 : 28; /* reserved */
/* union _qd {*/ /* why this? */
qdesfmt0_t qdf0[126];
/* } qd;*/
} __attribute__ ((packed,aligned(4096))) qdr_t;
/*
* queue information block (QIB)
*/
#define QIB_AC_INBOUND_PCI_SUPPORTED 0x80
#define QIB_AC_OUTBOUND_PCI_SUPPORTED 0x40
typedef struct _qib {
unsigned int qfmt : 8; /* queue format */
unsigned int pfmt : 8; /* impl. dep. parameter format */
unsigned int res1 : 8; /* reserved */
unsigned int ac : 8; /* adapter characteristics */
unsigned int res2; /* reserved */
#ifdef QDIO_32_BIT
unsigned long res3; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long isliba; /* absolute address of 1st
input SLIB */
#ifdef QDIO_32_BIT
unsigned long res4; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long osliba; /* absolute address of 1st
output SLIB */
unsigned int res5; /* reserved */
unsigned int res6; /* reserved */
unsigned char ebcnam[8]; /* adapter identifier in EBCDIC */
unsigned char res7[88]; /* reserved */
unsigned char parm[QDIO_MAX_BUFFERS_PER_Q];
/* implementation dependent
parameters */
} __attribute__ ((packed,aligned(256))) qib_t;
/*
* storage-list-information block element (SLIBE)
*/
typedef struct _slibe {
#ifdef QDIO_32_BIT
unsigned long res; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long parms; /* implementation dependent
parameters */
} slibe_t;
/*
* storage-list-information block (SLIB)
*/
typedef struct _slib {
#ifdef QDIO_32_BIT
unsigned long res1; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long nsliba; /* next SLIB address (if any) */
#ifdef QDIO_32_BIT
unsigned long res2; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long sla; /* SL address */
#ifdef QDIO_32_BIT
unsigned long res3; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long slsba; /* SLSB address */
unsigned char res4[1000]; /* reserved */
slibe_t slibe[QDIO_MAX_BUFFERS_PER_Q]; /* SLIB elements */
} __attribute__ ((packed,aligned(2048))) slib_t;
typedef struct _sbal_flags {
unsigned char res1 : 1; /* reserved */
unsigned char last : 1; /* last entry */
unsigned char cont : 1; /* contiguous storage */
unsigned char res2 : 1; /* reserved */
unsigned char frag : 2; /* fragmentation (s.below) */
unsigned char res3 : 2; /* reserved */
} __attribute__ ((packed)) sbal_flags_t;
#define SBAL_FLAGS_FIRST_FRAG 0x04000000UL
#define SBAL_FLAGS_MIDDLE_FRAG 0x08000000UL
#define SBAL_FLAGS_LAST_FRAG 0x0c000000UL
#define SBAL_FLAGS_LAST_ENTRY 0x40000000UL
#define SBAL_FLAGS_CONTIGUOUS 0x20000000UL
#define SBAL_FLAGS0_DATA_CONTINUATION 0x20UL
/* Awesome OpenFCP extensions */
#define SBAL_FLAGS0_TYPE_STATUS 0x00UL
#define SBAL_FLAGS0_TYPE_WRITE 0x08UL
#define SBAL_FLAGS0_TYPE_READ 0x10UL
#define SBAL_FLAGS0_TYPE_WRITE_READ 0x18UL
#define SBAL_FLAGS0_MORE_SBALS 0x04UL
#define SBAL_FLAGS0_COMMAND 0x02UL
#define SBAL_FLAGS0_LAST_SBAL 0x00UL
#define SBAL_FLAGS0_ONLY_SBAL SBAL_FLAGS0_COMMAND
#define SBAL_FLAGS0_MIDDLE_SBAL SBAL_FLAGS0_MORE_SBALS
#define SBAL_FLAGS0_FIRST_SBAL SBAL_FLAGS0_MORE_SBALS | SBAL_FLAGS0_COMMAND
/* Naught of interest beyond this point */
#define SBAL_FLAGS0_PCI 0x40
typedef struct _sbal_sbalf_0 {
unsigned char res1 : 1; /* reserved */
unsigned char pci : 1; /* PCI indicator */
unsigned char cont : 1; /* data continuation */
unsigned char sbtype: 2; /* storage-block type (OpenFCP) */
unsigned char res2 : 3; /* reserved */
} __attribute__ ((packed)) sbal_sbalf_0_t;
typedef struct _sbal_sbalf_1 {
unsigned char res1 : 4; /* reserved */
unsigned char key : 4; /* storage key */
} __attribute__ ((packed)) sbal_sbalf_1_t;
typedef struct _sbal_sbalf_14 {
unsigned char res1 : 4; /* reserved */
unsigned char erridx : 4; /* error index */
} __attribute__ ((packed)) sbal_sbalf_14_t;
typedef struct _sbal_sbalf_15 {
unsigned char reason; /* reserved */
} __attribute__ ((packed)) sbal_sbalf_15_t;
typedef union _sbal_sbalf {
sbal_sbalf_0_t i0;
sbal_sbalf_1_t i1;
sbal_sbalf_14_t i14;
sbal_sbalf_15_t i15;
unsigned char value;
} sbal_sbalf_t;
typedef struct _sbale {
union {
sbal_flags_t bits; /* flags */
unsigned char value;
} flags;
unsigned int res1 : 16; /* reserved */
sbal_sbalf_t sbalf; /* SBAL flags */
unsigned int res2 : 16; /* reserved */
unsigned int count : 16; /* data count */
#ifdef QDIO_32_BIT
unsigned long res3; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long addr; /* absolute data address */
} __attribute__ ((packed,aligned(16))) sbal_element_t;
/*
* strorage-block access-list (SBAL)
*/
typedef struct _sbal {
sbal_element_t element[QDIO_MAX_ELEMENTS_PER_BUFFER];
} __attribute__ ((packed,aligned(256))) sbal_t;
/*
* storage-list (SL)
*/
typedef struct _sl_element {
#ifdef QDIO_32_BIT
unsigned long res; /* reserved */
#endif /* QDIO_32_BIT */
unsigned long sbal; /* absolute SBAL address */
} __attribute__ ((packed)) sl_element_t;
typedef struct _sl {
sl_element_t element[QDIO_MAX_BUFFERS_PER_Q];
} __attribute__ ((packed,aligned(1024))) sl_t;
/*
* storage-list-state block (SLSB)
*/
/*typedef struct _slsb_val {*/
/* unsigned char value; */ /* SLSB entry as a single byte value */
/*} __attribute__ ((packed)) slsb_val_t;*/
typedef struct _slsb_flags {
unsigned char owner : 2; /* SBAL owner */
unsigned char type : 1; /* buffer type */
unsigned char state : 5; /* processing state */
} __attribute__ ((packed)) slsb_flags_t;
typedef struct _slsb {
union _acc {
unsigned char val[QDIO_MAX_BUFFERS_PER_Q];
slsb_flags_t flags[QDIO_MAX_BUFFERS_PER_Q];
} acc;
} __attribute__ ((packed,aligned(256))) slsb_t;
/*
* SLSB values
*/
#define SLSB_OWNER_PROG 1
#define SLSB_OWNER_CU 2
#define SLSB_TYPE_INPUT 0
#define SLSB_TYPE_OUTPUT 1
#define SLSB_STATE_NOT_INIT 0
#define SLSB_STATE_EMPTY 1
#define SLSB_STATE_PRIMED 2
#define SLSB_STATE_HALTED 0xe
#define SLSB_STATE_ERROR 0xf
#define SLSB_P_INPUT_NOT_INIT 0x80
#define SLSB_P_INPUT_PROCESSING 0x81
#define SLSB_CU_INPUT_EMPTY 0x41
#define SLSB_P_INPUT_PRIMED 0x82
#define SLSB_P_INPUT_HALTED 0x8E
#define SLSB_P_INPUT_ERROR 0x8F
#define SLSB_P_OUTPUT_NOT_INIT 0xA0
#define SLSB_P_OUTPUT_EMPTY 0xA1
#define SLSB_CU_OUTPUT_PRIMED 0x62
#define SLSB_P_OUTPUT_HALTED 0xAE
#define SLSB_P_OUTPUT_ERROR 0xAF
#define SLSB_ERROR_DURING_LOOKUP 0xFF
/*
* path management control word (extended layout)
*/
typedef struct {
unsigned long intparm; /* interruption parameter */
unsigned int qf : 1; /* qdio facility */
unsigned int res0 : 1; /* reserved */
unsigned int isc : 3; /* interruption sublass */
unsigned int res5 : 3; /* reserved zeros */
unsigned int ena : 1; /* enabled */
unsigned int lm : 2; /* limit mode */
unsigned int mme : 2; /* measurement-mode enable */
unsigned int mp : 1; /* multipath mode */
unsigned int tf : 1; /* timing facility */
unsigned int dnv : 1; /* device number valid */
unsigned int dev : 16; /* device number */
unsigned char lpm; /* logical path mask */
unsigned char pnom; /* path not operational mask */
unsigned char lpum; /* last path used mask */
unsigned char pim; /* path installed mask */
unsigned short mbi; /* measurement-block index */
unsigned char pom; /* path operational mask */
unsigned char pam; /* path available mask */
unsigned char chpid[8]; /* CHPID 0-7 (if available) */
unsigned int res1 : 8; /* reserved */
unsigned int st : 3; /* */
unsigned int res2 : 20; /* reserved */
unsigned int csense : 1; /* concurrent sense; can be enabled
per MSCH, however, if facility
is not installed, this results
in an operand exception. */
} pmcw_e_t;
/*
* subchannel status word (extended layout)
*/
typedef struct {
unsigned int key : 4; /* subchannel key */
unsigned int sctl : 1; /* suspend control */
unsigned int eswf : 1; /* ESW format */
unsigned int cc : 2; /* deferred condition code */
unsigned int fmt : 1; /* format */
unsigned int pfch : 1; /* prefetch */
unsigned int isic : 1; /* initial-status interruption control */
unsigned int alcc : 1; /* address-limit checking control */
unsigned int ssi : 1; /* supress-suspended interruption */
unsigned int zcc : 1; /* zero condition code */
unsigned int ectl : 1; /* extended control */
unsigned int pno : 1; /* path not operational */
unsigned int qact : 1; /* qdio active */
unsigned int fctl : 3; /* function control */
unsigned int actl : 7; /* activity control */
unsigned int stctl : 5; /* status control */
unsigned long cpa; /* channel program address */
unsigned int dstat : 8; /* device status */
unsigned int cstat : 8; /* subchannel status */
unsigned int count : 16; /* residual count */
} scsw_e_t;
typedef struct qdio_q_t {
volatile slsb_t slsb;
__u32 * volatile dev_st_chg_ind;
int is_input_q;
int is_0copy_sbals_q;
int irq;
unsigned int is_iqdio_q;
/* bit 0 means queue 0, bit 1 means queue 1, ... */
unsigned int mask;
unsigned int q_no;
qdio_handler_t (*handler);
/* points to the next buffer to be checked for having
* been processed by the card (outbound)
* or to the next buffer the program should check for (inbound) */
volatile int first_to_check;
/* and the last time it was: */
volatile int last_move_ftc;
atomic_t number_of_buffers_used;
atomic_t polling;
unsigned int siga_in;
unsigned int siga_out;
unsigned int siga_sync;
unsigned int siga_sync_done_on_thinints;
unsigned int hydra_gives_outbound_pcis;
/* used to save beginning position when calling dd_handlers */
int first_element_to_kick;
atomic_t use_count;
atomic_t is_in_shutdown;
#ifdef QDIO_USE_TIMERS_FOR_POLLING
struct timer_list timer;
atomic_t timer_already_set;
spinlock_t timer_lock;
#else /* QDIO_USE_TIMERS_FOR_POLLING */
struct tasklet_struct tasklet;
#endif /* QDIO_USE_TIMERS_FOR_POLLING */
unsigned int state;
/* used to store the error condition during a data transfer */
unsigned int qdio_error;
unsigned int siga_error;
unsigned int error_status_flags;
/* list of interesting queues */
volatile struct qdio_q_t *list_next;
volatile struct qdio_q_t *list_prev;
slib_t *slib; /* a page is allocated under this pointer,
sl points into this page, offset PAGE_SIZE/2
(after slib) */
sl_t *sl;
volatile sbal_t *sbal[QDIO_MAX_BUFFERS_PER_Q];
qdio_buffer_t *qdio_buffers[QDIO_MAX_BUFFERS_PER_Q];
unsigned long int_parm;
/*struct {
int in_bh_check_limit;
int threshold;
} threshold_classes[QDIO_STATS_CLASSES];*/
struct {
/* inbound: the time to stop polling
outbound: the time to kick peer */
int threshold; /* the real value */
/* outbound: last time of do_QDIO
inbound: last time of noticing incoming data */
/*__u64 last_transfer_times[QDIO_STATS_NUMBER];
int last_transfer_index; */
__u64 last_transfer_time;
} timing;
unsigned int queue_type;
} __attribute__ ((aligned(256))) qdio_q_t;
typedef struct qdio_irq_t {
__u32 * volatile dev_st_chg_ind;
unsigned long int_parm;
int irq;
unsigned int is_iqdio_irq;
unsigned int hydra_gives_outbound_pcis;
unsigned int sync_done_on_outb_pcis;
unsigned int state;
spinlock_t setting_up_lock;
unsigned int no_input_qs;
unsigned int no_output_qs;
unsigned char qdioac;
qdio_q_t *input_qs[QDIO_MAX_QUEUES_PER_IRQ];
qdio_q_t *output_qs[QDIO_MAX_QUEUES_PER_IRQ];
ccw1_t ccw;
int io_result_cstat;
int io_result_dstat;
int io_result_flags;
atomic_t interrupt_has_arrived;
atomic_t interrupt_has_been_cleaned;
wait_queue_head_t wait_q;
qdr_t *qdr;
qdio_cmds_t commands;
qib_t qib;
io_handler_func_t original_int_handler;
struct qdio_irq_t *next;
} qdio_irq_t;
#define QDIO_CHSC_RESPONSE_CODE_OK 1
/* flags for st qdio sch data */
#define CHSC_FLAG_QDIO_CAPABILITY 0x80
#define CHSC_FLAG_VALIDITY 0x40
#define CHSC_FLAG_SIGA_INPUT_NECESSARY 0x40
#define CHSC_FLAG_SIGA_OUTPUT_NECESSARY 0x20
#define CHSC_FLAG_SIGA_SYNC_NECESSARY 0x10
#define CHSC_FLAG_SIGA_SYNC_DONE_ON_THININTS 0x08
#define CHSC_FLAG_SIGA_SYNC_DONE_ON_OUTB_PCIS 0x04
typedef struct qdio_chsc_area_t {
struct {
/* word 0 */
__u16 command_code1;
__u16 command_code2;
/* word 1 */
__u16 operation_code;
__u16 first_sch;
/* word 2 */
__u8 reserved1;
__u8 image_id;
__u16 last_sch;
/* word 3 */
__u32 reserved2;
/* word 4 */
union {
struct {
/* word 4&5 */
__u64 summary_indicator_addr;
/* word 6&7 */
__u64 subchannel_indicator_addr;
/* word 8 */
int ks:4;
int kc:4;
int reserved1:21;
int isc:3;
/* word 9&10 */
__u32 reserved2[2];
/* word 11 */
__u32 subsystem_id;
/* word 12-1015 */
__u32 reserved3[1004];
} __attribute__ ((packed,aligned(4))) set_chsc;
struct {
/* word 4&5 */
__u32 reserved1[2];
/* word 6 */
__u32 delay_target;
/* word 7-1015 */
__u32 reserved4[1009];
} __attribute__ ((packed,aligned(4))) set_chsc_fast;
struct {
/* word 0 */
__u16 length;
__u16 response_code;
/* word 1 */
__u32 reserved1;
/* words 2 to 9 for st sch qdio data */
__u8 flags;
__u8 reserved2;
__u16 sch;
__u8 qfmt;
__u8 reserved3;
__u8 qdioac;
__u8 sch_class;
__u8 reserved4;
__u8 icnt;
__u8 reserved5;
__u8 ocnt;
/* plus 5 words of reserved fields */
} __attribute__ ((packed,aligned(8)))
store_qdio_data_response;
} operation_data_area;
} __attribute__ ((packed,aligned(8))) request_block;
struct {
/* word 0 */
__u16 length;
__u16 response_code;
/* word 1 */
__u32 reserved1;
} __attribute__ ((packed,aligned(8))) response_block;
} __attribute__ ((packed,aligned(PAGE_SIZE))) qdio_chsc_area_t;
#define QDIO_PRINTK_HEADER QDIO_NAME ": "
#if QDIO_VERBOSE_LEVEL>8
#define QDIO_PRINT_STUPID(x...) printk( KERN_DEBUG QDIO_PRINTK_HEADER x)
#else
#define QDIO_PRINT_STUPID(x...)
#endif
#if QDIO_VERBOSE_LEVEL>7
#define QDIO_PRINT_ALL(x...) printk( QDIO_PRINTK_HEADER x)
#else
#define QDIO_PRINT_ALL(x...)
#endif
#if QDIO_VERBOSE_LEVEL>6
#define QDIO_PRINT_INFO(x...) printk( QDIO_PRINTK_HEADER x)
#else
#define QDIO_PRINT_INFO(x...)
#endif
#if QDIO_VERBOSE_LEVEL>5
#define QDIO_PRINT_WARN(x...) printk( QDIO_PRINTK_HEADER x)
#else
#define QDIO_PRINT_WARN(x...)
#endif
#if QDIO_VERBOSE_LEVEL>4
#define QDIO_PRINT_ERR(x...) printk( QDIO_PRINTK_HEADER x)
#else
#define QDIO_PRINT_ERR(x...)
#endif
#if QDIO_VERBOSE_LEVEL>3
#define QDIO_PRINT_CRIT(x...) printk( QDIO_PRINTK_HEADER x)
#else
#define QDIO_PRINT_CRIT(x...)
#endif
#if QDIO_VERBOSE_LEVEL>2
#define QDIO_PRINT_ALERT(x...) printk( QDIO_PRINTK_HEADER x)
#else
#define QDIO_PRINT_ALERT(x...)
#endif
#if QDIO_VERBOSE_LEVEL>1
#define QDIO_PRINT_EMERG(x...) printk( QDIO_PRINTK_HEADER x)
#else
#define QDIO_PRINT_EMERG(x...)
#endif
#endif /* __QDIO_H__ */
#ifndef _S390X_RWSEM_H
#define _S390X_RWSEM_H
/*
* include/asm-s390x/rwsem.h
*
* S390 version
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*
* Based on asm-alpha/semaphore.h and asm-i386/rwsem.h
*/
/*
*
* The MSW of the count is the negated number of active writers and waiting
* lockers, and the LSW is the total number of active locks
*
* The lock count is initialized to 0 (no active and no waiting lockers).
*
* When a writer subtracts WRITE_BIAS, it'll get 0xffff0001 for the case of an
* uncontended lock. This can be determined because XADD returns the old value.
* Readers increment by 1 and see a positive value when uncontended, negative
* if there are writers (and maybe) readers waiting (in which case it goes to
* sleep).
*
* The value of WAITING_BIAS supports up to 32766 waiting processes. This can
* be extended to 65534 by manually checking the whole MSW rather than relying
* on the S flag.
*
* The value of ACTIVE_BIAS supports up to 65535 active processes.
*
* This should be totally fair - if anything is waiting, a process that wants a
* lock will go to the back of the queue. When the currently active lock is
* released, if there's a writer at the front of the queue, then that and only
* that will be woken up; if there's a bunch of consequtive readers at the
* front, then they'll all be woken up, but no other readers will be.
*/
#ifndef _LINUX_RWSEM_H
#error please dont include asm/rwsem.h directly, use linux/rwsem.h instead
#endif
#ifdef __KERNEL__
#include <linux/list.h>
#include <linux/spinlock.h>
struct rwsem_waiter;
extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_wake(struct rw_semaphore *);
/*
* the semaphore definition
*/
struct rw_semaphore {
signed long count;
spinlock_t wait_lock;
struct list_head wait_list;
};
#define RWSEM_UNLOCKED_VALUE 0x0000000000000000L
#define RWSEM_ACTIVE_BIAS 0x0000000000000001L
#define RWSEM_ACTIVE_MASK 0x00000000ffffffffL
#define RWSEM_WAITING_BIAS (-0x0000000100000000L)
#define RWSEM_ACTIVE_READ_BIAS RWSEM_ACTIVE_BIAS
#define RWSEM_ACTIVE_WRITE_BIAS (RWSEM_WAITING_BIAS + RWSEM_ACTIVE_BIAS)
/*
* initialisation
*/
#define __RWSEM_INITIALIZER(name) \
{ RWSEM_UNLOCKED_VALUE, SPIN_LOCK_UNLOCKED, LIST_HEAD_INIT((name).wait_list) }
#define DECLARE_RWSEM(name) \
struct rw_semaphore name = __RWSEM_INITIALIZER(name)
static inline void init_rwsem(struct rw_semaphore *sem)
{
sem->count = RWSEM_UNLOCKED_VALUE;
spin_lock_init(&sem->wait_lock);
INIT_LIST_HEAD(&sem->wait_list);
}
/*
* lock for reading
*/
static inline void __down_read(struct rw_semaphore *sem)
{
signed long old, new;
__asm__ __volatile__(
" lg %0,0(%2)\n"
"0: lgr %1,%0\n"
" aghi %1,%3\n"
" csg %0,%1,0(%2)\n"
" jl 0b"
: "=&d" (old), "=&d" (new)
: "a" (&sem->count), "i" (RWSEM_ACTIVE_READ_BIAS)
: "cc", "memory" );
if (old < 0)
rwsem_down_read_failed(sem);
}
/*
* lock for writing
*/
static inline void __down_write(struct rw_semaphore *sem)
{
signed long old, new, tmp;
tmp = RWSEM_ACTIVE_WRITE_BIAS;
__asm__ __volatile__(
" lg %0,0(%2)\n"
"0: lgr %1,%0\n"
" ag %1,%3\n"
" csg %0,%1,0(%2)\n"
" jl 0b"
: "=&d" (old), "=&d" (new)
: "a" (&sem->count), "m" (tmp)
: "cc", "memory" );
if (old != 0)
rwsem_down_write_failed(sem);
}
/*
* unlock after reading
*/
static inline void __up_read(struct rw_semaphore *sem)
{
signed long old, new;
__asm__ __volatile__(
" lg %0,0(%2)\n"
"0: lgr %1,%0\n"
" aghi %1,%3\n"
" csg %0,%1,0(%2)\n"
" jl 0b"
: "=&d" (old), "=&d" (new)
: "a" (&sem->count), "i" (-RWSEM_ACTIVE_READ_BIAS)
: "cc", "memory" );
if (new < 0)
if ((new & RWSEM_ACTIVE_MASK) == 0)
rwsem_wake(sem);
}
/*
* unlock after writing
*/
static inline void __up_write(struct rw_semaphore *sem)
{
signed long old, new, tmp;
tmp = -RWSEM_ACTIVE_WRITE_BIAS;
__asm__ __volatile__(
" lg %0,0(%2)\n"
"0: lgr %1,%0\n"
" ag %1,%3\n"
" csg %0,%1,0(%2)\n"
" jl 0b"
: "=&d" (old), "=&d" (new)
: "a" (&sem->count), "m" (tmp)
: "cc", "memory" );
if (new < 0)
if ((new & RWSEM_ACTIVE_MASK) == 0)
rwsem_wake(sem);
}
/*
* implement atomic add functionality
*/
static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
{
signed long old, new;
__asm__ __volatile__(
" lg %0,0(%2)\n"
"0: lgr %1,%0\n"
" agr %1,%3\n"
" csg %0,%1,0(%2)\n"
" jl 0b"
: "=&d" (old), "=&d" (new)
: "a" (&sem->count), "d" (delta)
: "cc", "memory" );
}
/*
* implement exchange and add functionality
*/
static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
{
signed long old, new;
__asm__ __volatile__(
" lg %0,0(%2)\n"
"0: lgr %1,%0\n"
" agr %1,%3\n"
" csg %0,%1,0(%2)\n"
" jl 0b"
: "=&d" (old), "=&d" (new)
: "a" (&sem->count), "d" (delta)
: "cc", "memory" );
return new;
}
#endif /* __KERNEL__ */
#endif /* _S390X_RWSEM_H */
/*
* include/asm-s390/s390-gdbregs.h
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com)
*
* used both by the linux kernel for remote debugging & gdb
*/
#ifndef _S390_GDBREGS_H
#define _S390_GDBREGS_H
#ifdef __KERNEL__
#include <asm/s390-regs-common.h>
#else
#include <s390x/s390-regs-common.h>
#endif
#define S390_MAX_INSTR_SIZE 6
#define NUM_REGS (2+NUM_GPRS+NUM_ACRS+NUM_CRS+1+NUM_FPRS)
#define FIRST_ACR (2+NUM_GPRS)
#define LAST_ACR (FIRST_ACR+NUM_ACRS-1)
#define FIRST_CR (FIRST_ACR+NUM_ACRS)
#define LAST_CR (FIRST_CR+NUM_CRS-1)
#define PSWM_REGNUM 0
#define PC_REGNUM 1
#define GP0_REGNUM 2 /* GPR register 0 */
#define GP_LAST_REGNUM (GP0_REGNUM+NUM_GPRS-1)
#define RETADDR_REGNUM (GP0_REGNUM+14) /* Usually return address */
#define SP_REGNUM (GP0_REGNUM+15) /* Contains address of top of stack */
#define FP_REGNUM SP_REGNUM /* needed in findvar.c still */
#define FRAME_REGNUM (GP0_REGNUM+11)
#define FPC_REGNUM (GP0_REGNUM+NUM_GPRS+NUM_ACRS+NUM_CRS)
#define FP0_REGNUM (FPC_REGNUM+1) /* FPR (Floating point) register 0 */
#define FPLAST_REGNUM (FP0_REGNUM+NUM_FPRS-1) /* Last floating point register */
/* The top of this structure is as similar as possible to a pt_regs structure to */
/* simplify code */
typedef struct
{
S390_REGS_COMMON
__u32 crs[NUM_CRS];
s390_fp_regs fp_regs;
} s390_gdb_regs __attribute__((packed));
#define REGISTER_NAMES \
{ \
"pswm","pswa", \
"gpr0","gpr1","gpr2","gpr3","gpr4","gpr5","gpr6","gpr7", \
"gpr8","gpr9","gpr10","gpr11","gpr12","gpr13","gpr14","gpr15", \
"acr0","acr1","acr2","acr3","acr4","acr5","acr6","acr7", \
"acr8","acr9","acr10","acr11","acr12","acr13","acr14","acr15", \
"cr0","cr1","cr2","cr3","cr4","cr5","cr6","cr7", \
"cr8","cr9","cr10","cr11","cr12","cr13","cr14","cr15", \
"fpc", \
"fpr0","fpr1","fpr2","fpr3","fpr4","fpr5","fpr6","fpr7", \
"fpr8","fpr9","fpr10","fpr11","fpr12","fpr13","fpr14","fpr15" \
}
/* Index within `registers' of the first byte of the space for
register N. */
#define ACR0_OFFSET ((PSW_MASK_SIZE+PSW_ADDR_SIZE)+(GPR_SIZE*NUM_GPRS))
#define CR0_OFFSET (ACR0_OFFSET+(ACR_SIZE+NUM_ACRS))
#define FPC_OFFSET (CR0_OFFSET+(CR_SIZE*NUM_CRS))
#define FP0_OFFSET (FPC_OFFSET+(FPC_SIZE+FPC_PAD_SIZE))
#define REGISTER_BYTES \
((FP0_OFFSET)+(FPR_SIZE*NUM_FPRS))
#define REGISTER_BYTE(N) ((N)<=GP_LAST_REGNUM ? (N)*8: \
(N) <= LAST_ACR ? (ACR0_OFFSET+(((N)-FIRST_ACR)*ACR_SIZE)): \
(N) <= LAST_CR ? (CR0_OFFSET+(((N)-FIRST_CR)*CR_SIZE)): \
(N) == FPC_REGNUM ? FPC_OFFSET:(FP0_OFFSET+(((N)-FP0_REGNUM)*FPR_SIZE)))
#endif
......@@ -25,6 +25,10 @@ typedef struct ext_int_info_t {
extern ext_int_info_t *ext_int_hash[];
int register_external_interrupt(__u16 code, ext_int_handler_t handler);
int register_early_external_interrupt(__u16 code, ext_int_handler_t handler,
ext_int_info_t *info);
int unregister_external_interrupt(__u16 code, ext_int_handler_t handler);
int unregister_early_external_interrupt(__u16 code, ext_int_handler_t handler,
ext_int_info_t *info);
#endif
......@@ -18,10 +18,13 @@
typedef struct _ioinfo {
unsigned int irq; /* aka. subchannel number */
spinlock_t irq_lock; /* irq lock */
void *private_data; /* pointer to private data */
struct _ioinfo *prev;
struct _ioinfo *next;
__u8 st; /* subchannel type */
union {
unsigned int info;
struct {
......@@ -50,10 +53,9 @@ typedef struct _ioinfo {
unsigned int esid : 1; /* Ext. SenseID supported by HW */
unsigned int rcd : 1; /* RCD supported by HW */
unsigned int repnone : 1; /* don't call IRQ handler on interrupt */
unsigned int newreq : 1; /* new register interface */
unsigned int dval : 1; /* device number valid */
unsigned int unknown : 1; /* unknown device - if SenseID failed */
unsigned int unused : (sizeof(unsigned int)*8 - 24); /* unused */
unsigned int unused : (sizeof(unsigned int)*8 - 23); /* unused */
} __attribute__ ((packed)) flags;
} ui;
......@@ -75,6 +77,7 @@ typedef struct _ioinfo {
unsigned long qintparm; /* queued interruption parameter */
unsigned long qflag; /* queued flags */
__u8 qlpm; /* queued logical path mask */
ssd_info_t ssd_info; /* subchannel description */
} __attribute__ ((aligned(8))) ioinfo_t;
......@@ -89,6 +92,12 @@ typedef struct _ioinfo {
#define IOINFO_FLAGS_REPALL 0x00800000
extern ioinfo_t *ioinfo[];
int s390_set_private_data(int irq, void * data);
void * s390_get_private_data(int irq);
#define CHSC_SEI_ACC_CHPID 1
#define CHSC_SEI_ACC_LINKADDR 2
#define CHSC_SEI_ACC_FULLLINKADDR 3
#endif /* __s390io_h */
......@@ -13,10 +13,23 @@
#include <asm/types.h>
typedef struct _mci {
__u32 to_be_defined_1 : 9;
__u32 cp : 1; /* channel-report pending */
__u32 to_be_defined_2 : 22;
__u32 to_be_defined_3;
__u32 sd : 1; /* 00 system damage */
__u32 pd : 1; /* 01 instruction-processing damage */
__u32 sr : 1; /* 02 system recovery */
__u32 to_be_defined_1 : 4; /* 03-06 */
__u32 dg : 1; /* 07 degradation */
__u32 w : 1; /* 08 warning pending */
__u32 cp : 1; /* 09 channel-report pending */
__u32 to_be_defined_2 : 6; /* 10-15 */
__u32 se : 1; /* 16 storage error uncorrected */
__u32 sc : 1; /* 17 storage error corrected */
__u32 ke : 1; /* 18 storage-key error uncorrected */
__u32 ds : 1; /* 19 storage degradation */
__u32 to_be_defined_3 : 4; /* 20-23 */
__u32 fa : 1; /* 24 failing storage address validity */
__u32 to_be_defined_4 : 7; /* 25-31 */
__u32 ie : 1; /* 32 indirect storage error */
__u32 to_be_defined_5 : 31; /* 33-63 */
} mci_t;
//
......
/*
* include/asm-s390/semaphore.h
* include/asm-s390x/semaphore.h
*
* S390 version
* Copyright (C) 1999 IBM Deutschland Entwicklung GmbH, IBM Corporation
......@@ -18,15 +18,11 @@
struct semaphore {
atomic_t count;
int sleepers;
wait_queue_head_t wait;
};
#define __SEM_DEBUG_INIT(name)
#define __SEMAPHORE_INITIALIZER(name,count) \
{ ATOMIC_INIT(count), 0, __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) \
__SEM_DEBUG_INIT(name) }
{ ATOMIC_INIT(count), __WAIT_QUEUE_HEAD_INITIALIZER((name).wait) }
#define __MUTEX_INITIALIZER(name) \
__SEMAPHORE_INITIALIZER(name,1)
......@@ -39,7 +35,7 @@ struct semaphore {
static inline void sema_init (struct semaphore *sem, int val)
{
*sem = (struct semaphore)__SEMAPHORE_INITIALIZER((*sem),val);
*sem = (struct semaphore) __SEMAPHORE_INITIALIZER((*sem),val);
}
static inline void init_MUTEX (struct semaphore *sem)
......@@ -52,11 +48,6 @@ static inline void init_MUTEX_LOCKED (struct semaphore *sem)
sema_init(sem, 0);
}
asmlinkage void __down_failed(void /* special register calling convention */);
asmlinkage int __down_failed_interruptible(void /* params in registers */);
asmlinkage int __down_failed_trylock(void /* params in registers */);
asmlinkage void __up_wakeup(void /* special register calling convention */);
asmlinkage void __down(struct semaphore * sem);
asmlinkage int __down_interruptible(struct semaphore * sem);
asmlinkage int __down_trylock(struct semaphore * sem);
......@@ -79,11 +70,28 @@ static inline int down_interruptible(struct semaphore * sem)
static inline int down_trylock(struct semaphore * sem)
{
int ret = 0;
if (atomic_dec_return(&sem->count) < 0)
ret = __down_trylock(sem);
return ret;
int old_val, new_val;
/*
* This inline assembly atomically implements the equivalent
* to the following C code:
* old_val = sem->count.counter;
* if ((new_val = old_val) > 0)
* sem->count.counter = --new_val;
* In the ppc code this is called atomic_dec_if_positive.
*/
__asm__ __volatile__ (
" l %0,0(%3)\n"
"0: ltr %1,%0\n"
" jle 1f\n"
" ahi %1,-1\n"
" cs %0,%1,0(%3)\n"
" jl 0b\n"
"1:"
: "=&d" (old_val), "=&d" (new_val),
"+m" (sem->count.counter)
: "a" (&sem->count.counter) : "cc" );
return old_val <= 0;
}
static inline void up(struct semaphore * sem)
......
......@@ -13,7 +13,7 @@
#define RAMDISK_ORIGIN 0x800000
#define RAMDISK_SIZE 0x800000
#ifndef __ASSEMBLER__
#ifndef __ASSEMBLY__
#define IPL_DEVICE (*(unsigned long *) (0x10400))
#define INITRD_START (*(unsigned long *) (0x10408))
......
......@@ -13,81 +13,74 @@
#include <asm-generic/siginfo.h>
/*
* si_code values
* Digital reserves positive values for kernel-generated signals.
*/
#undef SI_TIMER
#define SI_TIMER -2 /* sent by timer expiration */
/*
* SIGILL si_codes
*/
#define ILL_ILLOPC 1 /* illegal opcode */
#define ILL_ILLOPN 2 /* illegal operand */
#define ILL_ILLADR 3 /* illegal addressing mode */
#define ILL_ILLTRP 4 /* illegal trap */
#define ILL_PRVOPC 5 /* privileged opcode */
#define ILL_PRVREG 6 /* privileged register */
#define ILL_COPROC 7 /* coprocessor error */
#define ILL_BADSTK 8 /* internal stack error */
#define ILL_ILLOPC (__SI_FAULT|1) /* illegal opcode */
#define ILL_ILLOPN (__SI_FAULT|2) /* illegal operand */
#define ILL_ILLADR (__SI_FAULT|3) /* illegal addressing mode */
#define ILL_ILLTRP (__SI_FAULT|4) /* illegal trap */
#define ILL_PRVOPC (__SI_FAULT|5) /* privileged opcode */
#define ILL_PRVREG (__SI_FAULT|6) /* privileged register */
#define ILL_COPROC (__SI_FAULT|7) /* coprocessor error */
#define ILL_BADSTK (__SI_FAULT|8) /* internal stack error */
#define NSIGILL 8
/*
* SIGFPE si_codes
*/
#define FPE_INTDIV 1 /* integer divide by zero */
#define FPE_INTOVF 2 /* integer overflow */
#define FPE_FLTDIV 3 /* floating point divide by zero */
#define FPE_FLTOVF 4 /* floating point overflow */
#define FPE_FLTUND 5 /* floating point underflow */
#define FPE_FLTRES 6 /* floating point inexact result */
#define FPE_FLTINV 7 /* floating point invalid operation */
#define FPE_FLTSUB 8 /* subscript out of range */
#define FPE_INTDIV (__SI_FAULT|1) /* integer divide by zero */
#define FPE_INTOVF (__SI_FAULT|2) /* integer overflow */
#define FPE_FLTDIV (__SI_FAULT|3) /* floating point divide by zero */
#define FPE_FLTOVF (__SI_FAULT|4) /* floating point overflow */
#define FPE_FLTUND (__SI_FAULT|5) /* floating point underflow */
#define FPE_FLTRES (__SI_FAULT|6) /* floating point inexact result */
#define FPE_FLTINV (__SI_FAULT|7) /* floating point invalid operation */
#define FPE_FLTSUB (__SI_FAULT|8) /* subscript out of range */
#define NSIGFPE 8
/*
* SIGSEGV si_codes
*/
#define SEGV_MAPERR 1 /* address not mapped to object */
#define SEGV_ACCERR 2 /* invalid permissions for mapped object */
#define SEGV_MAPERR (__SI_FAULT|1) /* address not mapped to object */
#define SEGV_ACCERR (__SI_FAULT|2) /* invalid permissions for mapped object */
#define NSIGSEGV 2
/*
* SIGBUS si_codes
*/
#define BUS_ADRALN 1 /* invalid address alignment */
#define BUS_ADRERR 2 /* non-existant physical address */
#define BUS_OBJERR 3 /* object specific hardware error */
#define BUS_ADRALN (__SI_FAULT|1) /* invalid address alignment */
#define BUS_ADRERR (__SI_FAULT|2) /* non-existant physical address */
#define BUS_OBJERR (__SI_FAULT|3) /* object specific hardware error */
#define NSIGBUS 3
/*
* SIGTRAP si_codes
*/
#define TRAP_BRKPT 1 /* process breakpoint */
#define TRAP_TRACE 2 /* process trace trap */
#define TRAP_BRKPT (__SI_FAULT|1) /* process breakpoint */
#define TRAP_TRACE (__SI_FAULT|2) /* process trace trap */
#define NSIGTRAP 2
/*
* SIGCHLD si_codes
*/
#define CLD_EXITED 1 /* child has exited */
#define CLD_KILLED 2 /* child was killed */
#define CLD_DUMPED 3 /* child terminated abnormally */
#define CLD_TRAPPED 4 /* traced child has trapped */
#define CLD_STOPPED 5 /* child has stopped */
#define CLD_CONTINUED 6 /* stopped child has continued */
#define CLD_EXITED (__SI_CHLD|1) /* child has exited */
#define CLD_KILLED (__SI_CHLD|2) /* child was killed */
#define CLD_DUMPED (__SI_CHLD|3) /* child terminated abnormally */
#define CLD_TRAPPED (__SI_CHLD|4) /* traced child has trapped */
#define CLD_STOPPED (__SI_CHLD|5) /* child has stopped */
#define CLD_CONTINUED (__SI_CHLD|6) /* stopped child has continued */
#define NSIGCHLD 6
/*
* SIGPOLL si_codes
*/
#define POLL_IN 1 /* data input available */
#define POLL_OUT 2 /* output buffers available */
#define POLL_MSG 3 /* input message available */
#define POLL_ERR 4 /* i/o error */
#define POLL_PRI 5 /* high priority input available */
#define POLL_HUP 6 /* device disconnected */
#define POLL_IN (__SI_POLL|1) /* data input available */
#define POLL_OUT (__SI_POLL|2) /* output buffers available */
#define POLL_MSG (__SI_POLL|3) /* input message available */
#define POLL_ERR (__SI_POLL|4) /* i/o error */
#define POLL_PRI (__SI_POLL|5) /* high priority input available */
#define POLL_HUP (__SI_POLL|6) /* device disconnected */
#define NSIGPOLL 6
#endif
......@@ -13,6 +13,7 @@
/* Avoid too many header ordering problems. */
struct siginfo;
struct pt_regs;
#ifdef __KERNEL__
/* Most things should be clean enough to redefine this at will, if care
......
......@@ -59,9 +59,6 @@ typedef enum
typedef enum
{
ec_schedule=0,
ec_restart,
ec_halt,
ec_power_off,
ec_call_function,
ec_bit_last
} ec_bit_sig;
......@@ -130,6 +127,6 @@ signal_processor_ps(__u32 *statusptr, __u64 parameter,
return ccode;
}
#endif __SIGP__
#endif /* __SIGP__ */
......@@ -10,6 +10,8 @@
#define __ASM_SMP_H
#include <linux/config.h>
#include <linux/threads.h>
#include <linux/ptrace.h>
#if defined(__KERNEL__) && defined(CONFIG_SMP) && !defined(__ASSEMBLY__)
......@@ -26,7 +28,7 @@ typedef struct
__u16 cpu;
} sigp_info;
extern unsigned long cpu_online_map;
extern volatile unsigned long cpu_online_map;
#define NO_PROC_ID 0xFF /* No processor magic marker */
......@@ -42,7 +44,7 @@ extern unsigned long cpu_online_map;
#define PROC_CHANGE_PENALTY 20 /* Schedule penalty */
#define smp_processor_id() (current->processor)
#define smp_processor_id() (current_thread_info()->cpu)
extern __inline__ int cpu_logical_map(int cpu)
{
......@@ -64,7 +66,5 @@ extern __inline__ __u16 hard_smp_processor_id(void)
#define cpu_logical_map(cpu) (cpu)
void smp_local_timer_interrupt(struct pt_regs * regs);
#endif
#endif
......@@ -27,32 +27,34 @@ typedef struct {
#define spin_unlock_wait(lp) do { barrier(); } while(((volatile spinlock_t *)(lp))->lock)
#define spin_is_locked(x) ((x)->lock != 0)
extern inline void spin_lock(spinlock_t *lp)
extern inline void _raw_spin_lock(spinlock_t *lp)
{
__asm__ __volatile(" bras 1,1f\n"
unsigned long reg1, reg2;
__asm__ __volatile(" bras %1,1f\n"
"0: # diag 0,0,68\n"
"1: slr 0,0\n"
" cs 0,1,0(%0)\n"
"1: slr %0,%0\n"
" cs %0,%1,0(%3)\n"
" jl 0b\n"
: : "a" (&lp->lock) : "0", "1", "cc", "memory" );
: "=&d" (reg1), "=&d" (reg2), "+m" (lp->lock)
: "a" (&lp->lock) : "cc" );
}
extern inline int spin_trylock(spinlock_t *lp)
extern inline int _raw_spin_trylock(spinlock_t *lp)
{
unsigned int result;
unsigned int result, reg;
__asm__ __volatile(" slr %0,%0\n"
" basr 1,0\n"
"0: cs %0,1,0(%1)"
: "=&d" (result)
: "a" (&lp->lock) : "1", "cc", "memory" );
" basr %1,0\n"
"0: cs %0,%1,0(%3)"
: "=&d" (result), "=&d" (reg), "+m" (lp->lock)
: "a" (&lp->lock) : "cc" );
return !result;
}
extern inline void spin_unlock(spinlock_t *lp)
extern inline void _raw_spin_unlock(spinlock_t *lp)
{
__asm__ __volatile(" xc 0(4,%0),0(%0)\n"
__asm__ __volatile(" xc 0(4,%1),0(%1)\n"
" bcr 15,0"
: : "a" (&lp->lock) : "memory", "cc" );
: "+m" (lp->lock) : "a" (&lp->lock) : "cc" );
}
/*
......@@ -74,43 +76,47 @@ typedef struct {
#define rwlock_init(x) do { *(x) = RW_LOCK_UNLOCKED; } while(0)
#define read_lock(rw) \
asm volatile(" lg 2,0(%0)\n" \
#define _raw_read_lock(rw) \
asm volatile(" lg 2,0(%1)\n" \
" j 1f\n" \
"0: # diag 0,0,68\n" \
"1: nihh 2,0x7fff\n" /* clear high (=write) bit */ \
" la 3,1(2)\n" /* one more reader */ \
" csg 2,3,0(%0)\n" /* try to write new value */ \
" csg 2,3,0(%1)\n" /* try to write new value */ \
" jl 0b" \
: : "a" (&(rw)->lock) : "2", "3", "cc", "memory" );
: "+m" ((rw)->lock) : "a" (&(rw)->lock) \
: "2", "3", "cc" )
#define read_unlock(rw) \
asm volatile(" lg 2,0(%0)\n" \
#define _raw_read_unlock(rw) \
asm volatile(" lg 2,0(%1)\n" \
" j 1f\n" \
"0: # diag 0,0,68\n" \
"1: lgr 3,2\n" \
" bctgr 3,0\n" /* one less reader */ \
" csg 2,3,0(%0)\n" \
" csg 2,3,0(%1)\n" \
" jl 0b" \
: : "a" (&(rw)->lock) : "2", "3", "cc", "memory" );
: "+m" ((rw)->lock) : "a" (&(rw)->lock) \
: "2", "3", "cc" )
#define write_lock(rw) \
#define _raw_write_lock(rw) \
asm volatile(" llihh 3,0x8000\n" /* new lock value = 0x80...0 */ \
" j 1f\n" \
"0: # diag 0,0,68\n" \
"1: slgr 2,2\n" /* old lock value must be 0 */ \
" csg 2,3,0(%0)\n" \
" csg 2,3,0(%1)\n" \
" jl 0b" \
: : "a" (&(rw)->lock) : "2", "3", "cc", "memory" );
: "+m" ((rw)->lock) : "a" (&(rw)->lock) \
: "2", "3", "cc" )
#define write_unlock(rw) \
#define _raw_write_unlock(rw) \
asm volatile(" slgr 3,3\n" /* new lock value = 0 */ \
" j 1f\n" \
"0: # diag 0,0,68\n" \
"1: llihh 2,0x8000\n" /* old lock value must be 0x8..0 */\
" csg 2,3,0(%0)\n" \
" csg 2,3,0(%1)\n" \
" jl 0b" \
: : "a" (&(rw)->lock) : "2", "3", "cc", "memory" );
: "+m" ((rw)->lock) : "a" (&(rw)->lock) \
: "2", "3", "cc" )
#endif /* __ASM_SPINLOCK_H */
#ifndef __ASM_S390X_SUSPEND_H
#define __ASM_S390X_SUSPEND_H
#endif
......@@ -12,18 +12,19 @@
#define __ASM_SYSTEM_H
#include <linux/config.h>
#include <asm/types.h>
#ifdef __KERNEL__
#include <asm/lowcore.h>
#endif
#include <linux/kernel.h>
#define prepare_to_switch() do { } while(0)
#define switch_to(prev,next,last) do { \
#define switch_to(prev,next) do { \
if (prev == next) \
break; \
save_fp_regs(&prev->thread.fp_regs); \
restore_fp_regs(&next->thread.fp_regs); \
last = resume(&prev->thread,&next->thread); \
resume(prev,next); \
} while (0)
struct task_struct;
......@@ -110,8 +111,6 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
: "+d" (x) : "a" (ptr)
: "memory", "cc", "0" );
break;
default:
abort();
}
return x;
}
......@@ -142,26 +141,26 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
/* interrupt control.. */
#define __sti() ({ \
unsigned long dummy; \
unsigned long __dummy; \
__asm__ __volatile__ ( \
"stosm %0,0x03" : "=m" (dummy) : : "memory"); \
"stosm 0(%0),0x03" : : "a" (&__dummy) : "memory"); \
})
#define __cli() ({ \
unsigned long flags; \
unsigned long __flags; \
__asm__ __volatile__ ( \
"stnsm %0,0xFC" : "=m" (flags) : : "memory"); \
flags; \
"stnsm 0(%0),0xFC" : : "a" (&__flags) : "memory"); \
__flags; \
})
#define __save_flags(x) \
__asm__ __volatile__("stosm %0,0" : "=m" (x) : : "memory")
__asm__ __volatile__("stosm 0(%0),0" : : "a" (&x) : "memory")
#define __restore_flags(x) \
__asm__ __volatile__("ssm %0" : : "m" (x) : "memory")
__asm__ __volatile__("ssm 0(%0)" : : "a" (&x) : "memory")
#define __load_psw(psw) \
__asm__ __volatile__("lpswe %0" : : "m" (psw) : "cc" );
__asm__ __volatile__("lpswe 0(%0)" : : "a" (&psw) : "cc" );
#define __ctl_load(array, low, high) ({ \
__asm__ __volatile__ ( \
......@@ -182,7 +181,7 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
})
#define __ctl_set_bit(cr, bit) ({ \
__u8 dummy[24]; \
__u8 __dummy[24]; \
__asm__ __volatile__ ( \
" la 1,%0\n" /* align to 8 byte */ \
" aghi 1,7\n" \
......@@ -195,12 +194,12 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
" ogr 0,%2\n" /* set the bit */ \
" stg 0,0(1)\n" \
"1: ex %1,6(2)" /* execute lctl */ \
: "=m" (dummy) : "a" (cr*17), "a" (1L<<(bit)) \
: "=m" (__dummy) : "a" (cr*17), "a" (1L<<(bit)) \
: "cc", "0", "1", "2"); \
})
#define __ctl_clear_bit(cr, bit) ({ \
__u8 dummy[24]; \
__u8 __dummy[24]; \
__asm__ __volatile__ ( \
" la 1,%0\n" /* align to 8 byte */ \
" aghi 1,7\n" \
......@@ -213,7 +212,7 @@ static inline unsigned long __xchg(unsigned long x, void * ptr, int size)
" ngr 0,%2\n" /* set the bit */ \
" stg 0,0(1)\n" \
"1: ex %1,6(2)" /* execute lctl */ \
: "=m" (dummy) : "a" (cr*17), "a" (~(1L<<(bit))) \
: "=m" (__dummy) : "a" (cr*17), "a" (~(1L<<(bit))) \
: "cc", "0", "1", "2"); \
})
......@@ -254,12 +253,17 @@ extern void smp_ctl_clear_bit(int cr, int bit);
#endif
#ifdef __KERNEL__
extern struct task_struct *resume(void *,void *);
extern struct task_struct *resume(void *, void *);
extern int save_fp_regs1(s390_fp_regs *fpregs);
extern void save_fp_regs(s390_fp_regs *fpregs);
extern int restore_fp_regs1(s390_fp_regs *fpregs);
extern void restore_fp_regs(s390_fp_regs *fpregs);
extern void (*_machine_restart)(char *command);
extern void (*_machine_halt)(void);
extern void (*_machine_power_off)(void);
#endif
#endif
......
/*************************************************************************
*
* tape390.h
* enables user programs to display messages on the tape device
*
* S390 and zSeries version
* Copyright (C) 2001 IBM Corporation
* Author(s): Despina Papadopoulou <despina_p@de.ibm.com>
*
*************************************************************************/
#ifndef _TAPE390_H
#define _TAPE390_H
#define TAPE390_DISPLAY _IOW('d', 1, struct display_struct)
/*
* The TAPE390_DISPLAY ioctl calls the Load Display command
* which transfers 17 bytes of data from the channel to the subsystem:
* - 1 format control byte, and
* - two 8-byte messages
*
* Format control byte:
* 0-2: New Message Overlay
* 3: Alternate Messages
* 4: Blink Message
* 5: Display Low/High Message
* 6: Reserved
* 7: Automatic Load Request
*
*/
typedef struct display_struct {
char cntrl;
char message1[8];
char message2[8];
} display_struct;
#endif
/*
* include/asm-s390/thread_info.h
*
* S390 version
* Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com)
*/
#ifndef _ASM_THREAD_INFO_H
#define _ASM_THREAD_INFO_H
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#include <asm/processor.h>
/*
* low level task data that entry.S needs immediate access to
* - this struct should fit entirely inside of one cache line
* - this struct shares the supervisor stack pages
* - if the contents of this structure are changed, the assembly constants must also be changed
*/
struct thread_info {
struct task_struct *task; /* main task structure */
struct exec_domain *exec_domain; /* execution domain */
unsigned long flags; /* low level flags */
unsigned int cpu; /* current CPU */
int preempt_count; /* 0 => preemptable, <0 => BUG */
};
#define PREEMPT_ACTIVE 0x4000000
/*
* macros/functions for gaining access to the thread information structure
*/
#define INIT_THREAD_INFO(tsk) \
{ \
task: &tsk, \
exec_domain: &default_exec_domain, \
flags: 0, \
cpu: 0, \
}
#define init_thread_info (init_thread_union.thread_info)
#define init_stack (init_thread_union.stack)
/* how to get the thread information struct from C */
static inline struct thread_info *current_thread_info(void)
{
return (struct thread_info *)((*(unsigned long *) 0xd40)-16384);
}
/* thread information allocation */
#define alloc_thread_info() ((struct thread_info *) \
__get_free_pages(GFP_KERNEL,2))
#define free_thread_info(ti) free_pages((unsigned long) (ti), 2)
#define get_thread_info(ti) get_task_struct((ti)->task)
#define put_thread_info(ti) put_task_struct((ti)->task)
#endif
/*
* Size of kernel stack for each process
*/
#define THREAD_SIZE (4*PAGE_SIZE)
/*
* thread information flags bit numbers
*/
#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
#define TIF_NOTIFY_RESUME 1 /* resumption notification requested */
#define TIF_SIGPENDING 2 /* signal pending */
#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
#define TIF_USEDFPU 16 /* FPU was used by this task this quantum (SMP) */
#define TIF_POLLING_NRFLAG 17 /* true if poll_idle() is polling
TIF_NEED_RESCHED */
#define _TIF_SYSCALL_TRACE (1<<TIF_SYSCALL_TRACE)
#define _TIF_NOTIFY_RESUME (1<<TIF_NOTIFY_RESUME)
#define _TIF_SIGPENDING (1<<TIF_SIGPENDING)
#define _TIF_NEED_RESCHED (1<<TIF_NEED_RESCHED)
#define _TIF_USEDFPU (1<<TIF_USEDFPU)
#define _TIF_POLLING_NRFLAG (1<<TIF_POLLING_NRFLAG)
#endif /* __KERNEL__ */
#endif /* _ASM_THREAD_INFO_H */
......@@ -17,13 +17,24 @@
(1000000/CLOCK_TICK_FACTOR) / (CLOCK_TICK_RATE/CLOCK_TICK_FACTOR)) \
<< (SHIFT_SCALE-SHIFT_HZ)) / HZ)
typedef unsigned long cycles_t;
typedef unsigned long long cycles_t;
extern cycles_t cacheflush_time;
static inline cycles_t get_cycles(void)
{
return 0;
cycles_t cycles;
__asm__("stck 0(%0)" : : "a" (&(cycles)) : "memory", "cc");
return cycles >> 2;
}
static inline unsigned long long get_clock (void)
{
unsigned long long clock;
__asm__("stck 0(%0)" : : "a" (&(clock)) : "memory", "cc");
return clock;
}
#endif
#ifndef _S390X_TLB_H
#define _S390X_TLB_H
/*
* s390x doesn't need any special per-pte or
* per-vma handling..
*/
#define tlb_start_vma(tlb, vma) do { } while (0)
#define tlb_end_vma(tlb, vma) do { } while (0)
#define tlb_remove_tlb_entry(tlb, pte, address) do { } while (0)
/*
* .. because we flush the whole mm when it
* fills up.
*/
#define tlb_flush(tlb) flush_tlb_mm((tlb)->mm)
#include <asm-generic/tlb.h>
#endif
#ifndef _S390X_TLBFLUSH_H
#define _S390X_TLBFLUSH_H
#include <linux/config.h>
#include <linux/mm.h>
#include <asm/processor.h>
/*
* TLB flushing:
*
* - flush_tlb() flushes the current mm struct TLBs
* - flush_tlb_all() flushes all processes TLBs
* - flush_tlb_mm(mm) flushes the specified mm context TLB's
* - flush_tlb_page(vma, vmaddr) flushes one page
* - flush_tlb_range(vma, start, end) flushes a range of pages
* - flush_tlb_kernel_range(start, end) flushes a range of kernel pages
* - flush_tlb_pgtables(mm, start, end) flushes a range of page tables
*/
/*
* S/390 has three ways of flushing TLBs
* 'ptlb' does a flush of the local processor
* 'csp' flushes the TLBs on all PUs of a SMP
* 'ipte' invalidates a pte in a page table and flushes that out of
* the TLBs of all PUs of a SMP
*/
#define local_flush_tlb() \
do { __asm__ __volatile__("ptlb": : :"memory"); } while (0)
#ifndef CONFIG_SMP
/*
* We always need to flush, since s390 does not flush tlb
* on each context switch
*/
static inline void flush_tlb(void)
{
local_flush_tlb();
}
static inline void flush_tlb_all(void)
{
local_flush_tlb();
}
static inline void flush_tlb_mm(struct mm_struct *mm)
{
local_flush_tlb();
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
local_flush_tlb();
}
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
local_flush_tlb();
}
#define flush_tlb_kernel_range(start, end) \
local_flush_tlb();
#else
#include <asm/smp.h>
extern void smp_ptlb_all(void);
static inline void global_flush_tlb(void)
{
long dummy = 0;
__asm__ __volatile__ (
" la 4,1(%0)\n"
" slr 2,2\n"
" slr 3,3\n"
" csp 2,4"
: : "a" (&dummy) : "cc", "2", "3", "4" );
}
/*
* We only have to do global flush of tlb if process run since last
* flush on any other pu than current.
* If we have threads (mm->count > 1) we always do a global flush,
* since the process runs on more than one processor at the same time.
*/
static inline void __flush_tlb_mm(struct mm_struct * mm)
{
if ((smp_num_cpus > 1) &&
((atomic_read(&mm->mm_count) != 1) ||
(mm->cpu_vm_mask != (1UL << smp_processor_id())))) {
mm->cpu_vm_mask = (1UL << smp_processor_id());
global_flush_tlb();
} else {
local_flush_tlb();
}
}
static inline void flush_tlb(void)
{
__flush_tlb_mm(current->mm);
}
static inline void flush_tlb_all(void)
{
global_flush_tlb();
}
static inline void flush_tlb_mm(struct mm_struct *mm)
{
__flush_tlb_mm(mm);
}
static inline void flush_tlb_page(struct vm_area_struct *vma,
unsigned long addr)
{
__flush_tlb_mm(vma->vm_mm);
}
static inline void flush_tlb_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end)
{
__flush_tlb_mm(vma->vm_mm);
}
#define flush_tlb_kernel_range(start, end) \
__flush_tlb_mm(&init_mm)
#endif
static inline void flush_tlb_pgtables(struct mm_struct *mm,
unsigned long start, unsigned long end)
{
/* S/390 does not keep any page table caches in TLB */
}
#endif /* _S390X_TLBFLUSH_H */
......@@ -15,6 +15,7 @@
* User space memory access functions
*/
#include <linux/sched.h>
#include <linux/errno.h>
#define VERIFY_READ 0
#define VERIFY_WRITE 1
......@@ -35,9 +36,10 @@
#define USER_DS MAKE_MM_SEG(1)
#define get_ds() (KERNEL_DS)
#define get_fs() (current->addr_limit)
#define set_fs(x) ({asm volatile("sar 4,%0"::"a" ((x).ar4));\
current->addr_limit = (x);})
#define get_fs() ({ mm_segment_t __x; \
asm volatile("ear %0,4":"=a" (__x)); \
__x;})
#define set_fs(x) ({asm volatile("sar 4,%0"::"a" ((x).ar4));})
#define segment_eq(a,b) ((a).ar4 == (b).ar4)
......@@ -82,21 +84,21 @@ extern inline int __put_user_asm_8(__u64 x, void *ptr)
{
int err;
__asm__ __volatile__ ( " sr %1,%1\n"
" la 4,%0\n"
__asm__ __volatile__ ( " sr %0,%0\n"
" la 4,%1\n"
" sacf 512\n"
"0: stg %2,0(4)\n"
"1: sacf 0\n"
".section .fixup,\"ax\"\n"
"2: lhi %1,%h3\n"
"2: lhi %0,%h3\n"
" jg 1b\n"
".previous\n"
".section __ex_table,\"a\"\n"
" .align 8\n"
" .quad 0b,2b\n"
".previous"
: "=m" (*((__u64*) ptr)) , "=&d" (err)
: "d" (x), "K" (-EFAULT)
: "=&d" (err)
: "m" (*((__u64*) ptr)), "d" (x), "K" (-EFAULT)
: "cc", "4" );
return err;
}
......@@ -104,21 +106,21 @@ extern inline int __put_user_asm_4(__u32 x, void *ptr)
{
int err;
__asm__ __volatile__ ( " sr %1,%1\n"
" la 4,%0\n"
__asm__ __volatile__ ( " sr %0,%0\n"
" la 4,%1\n"
" sacf 512\n"
"0: st %2,0(4)\n"
"1: sacf 0\n"
".section .fixup,\"ax\"\n"
"2: lhi %1,%h3\n"
"2: lhi %0,%h3\n"
" jg 1b\n"
".previous\n"
".section __ex_table,\"a\"\n"
" .align 8\n"
" .quad 0b,2b\n"
".previous"
: "=m" (*((__u32*) ptr)) , "=&d" (err)
: "d" (x), "K" (-EFAULT)
: "=&d" (err)
: "m" (*((__u32*) ptr)), "d" (x), "K" (-EFAULT)
: "cc", "4" );
return err;
}
......@@ -127,21 +129,21 @@ extern inline int __put_user_asm_2(__u16 x, void *ptr)
{
int err;
__asm__ __volatile__ ( " sr %1,%1\n"
" la 4,%0\n"
__asm__ __volatile__ ( " sr %0,%0\n"
" la 4,%1\n"
" sacf 512\n"
"0: sth %2,0(4)\n"
"1: sacf 0\n"
".section .fixup,\"ax\"\n"
"2: lhi %1,%h3\n"
"2: lhi %0,%h3\n"
" jg 1b\n"
".previous\n"
".section __ex_table,\"a\"\n"
" .align 8\n"
" .quad 0b,2b\n"
".previous"
: "=m" (*((__u16*) ptr)) , "=&d" (err)
: "d" (x), "K" (-EFAULT)
: "=&d" (err)
: "m" (*((__u16*) ptr)), "d" (x), "K" (-EFAULT)
: "cc", "4" );
return err;
}
......@@ -150,22 +152,22 @@ extern inline int __put_user_asm_1(__u8 x, void *ptr)
{
int err;
__asm__ __volatile__ ( " sr %1,%1\n"
" la 4,%0\n"
__asm__ __volatile__ ( " sr %0,%0\n"
" la 4,%1\n"
" sacf 512\n"
"0: stc %2,0(4)\n"
"1: sacf 0\n"
".section .fixup,\"ax\"\n"
"2: lhi %1,%h3\n"
"2: lhi %0,%h3\n"
" jg 1b\n"
".previous\n"
".section __ex_table,\"a\"\n"
" .align 8\n"
" .quad 0b,2b\n"
".previous"
: "=m" (*((__u8*) ptr)) , "=&d" (err)
: "d" (x), "K" (-EFAULT)
: "cc", "1", "4" );
: "=&d" (err)
: "m" (*((__u8*) ptr)), "d" (x), "K" (-EFAULT)
: "cc", "4" );
return err;
}
......@@ -175,19 +177,25 @@ extern inline int __put_user_asm_1(__u8 x, void *ptr)
*/
#define __put_user(x, ptr) \
({ \
__typeof__(*(ptr)) *__pu_addr = (ptr); \
__typeof__(*(ptr)) __x = (x); \
int __pu_err; \
switch (sizeof (*(ptr))) { \
switch (sizeof (*(__pu_addr))) { \
case 1: \
__pu_err = __put_user_asm_1((__u8)(__u64)(x),(ptr));\
__pu_err = __put_user_asm_1((__u8)(__u64)(__x), \
__pu_addr); \
break; \
case 2: \
__pu_err = __put_user_asm_2((__u16)(__u64)(x),(ptr));\
__pu_err = __put_user_asm_2((__u16)(__u64)(__x),\
__pu_addr); \
break; \
case 4: \
__pu_err = __put_user_asm_4((__u32)(__u64)(x),(ptr));\
__pu_err = __put_user_asm_4((__u32)(__u64)(__x),\
__pu_addr); \
break; \
case 8: \
__pu_err = __put_user_asm_8((__u64)(x),(ptr));\
__pu_err = __put_user_asm_8((__u64)(__x), \
__pu_addr); \
break; \
default: \
__pu_err = __put_user_bad(); \
......@@ -196,21 +204,10 @@ extern inline int __put_user_asm_1(__u8 x, void *ptr)
__pu_err; \
})
#define put_user(x, ptr) \
({ \
long __pu_err = -EFAULT; \
__typeof__(*(ptr)) *__pu_addr = (ptr); \
__typeof__(*(ptr)) __x = (x); \
if (__access_ok((long)__pu_addr,sizeof(*(ptr)))) { \
__pu_err = 0; \
__put_user((__x), (__pu_addr)); \
} \
__pu_err; \
})
#define put_user(x, ptr) __put_user(x, ptr)
extern int __put_user_bad(void);
#define __get_user_asm_8(x, ptr, err) \
({ \
__asm__ __volatile__ ( " sr %1,%1\n" \
......@@ -293,77 +290,44 @@ extern int __put_user_bad(void);
#define __get_user(x, ptr) \
({ \
__typeof__(ptr) __gu_addr = (ptr); \
__typeof__(*(ptr)) __x; \
int __gu_err; \
switch (sizeof(*(ptr))) { \
case 1: \
__get_user_asm_1(x,ptr,__gu_err); \
__get_user_asm_1(__x,__gu_addr,__gu_err); \
break; \
case 2: \
__get_user_asm_2(x,ptr,__gu_err); \
__get_user_asm_2(__x,__gu_addr,__gu_err); \
break; \
case 4: \
__get_user_asm_4(x,ptr,__gu_err); \
__get_user_asm_4(__x,__gu_addr,__gu_err); \
break; \
case 8: \
__get_user_asm_8(x,ptr,__gu_err); \
__get_user_asm_8(__x,__gu_addr,__gu_err); \
break; \
default: \
(x) = 0; \
__x = 0; \
__gu_err = __get_user_bad(); \
break; \
} \
__gu_err; \
})
#define get_user(x, ptr) \
({ \
long __gu_err = -EFAULT; \
__typeof__(ptr) __gu_addr = (ptr); \
__typeof__(*(ptr)) __x; \
if (__access_ok((long)__gu_addr,sizeof(*(ptr)))) { \
__gu_err = 0; \
__get_user((__x), (__gu_addr)); \
(x) = __x; \
} \
else \
(x) = 0; \
__gu_err; \
})
#define get_user(x, ptr) __get_user(x, ptr)
extern int __get_user_bad(void);
/*
* access register are set up, that 4 points to secondary (user) , 2 to primary (kernel)
*/
asmlinkage void __copy_from_user_fixup(void /* special calling convention */);
asmlinkage void __copy_to_user_fixup(void /* special calling convention */);
extern inline unsigned long
__copy_to_user_asm(void* to, const void* from, long n)
{
__asm__ __volatile__ ( " lgr 2,%2\n"
" lgr 4,%1\n"
" lgr 3,%0\n"
" lgr 5,3\n"
" sacf 512\n"
"0: mvcle 4,2,0\n"
" jo 0b\n"
" sacf 0\n"
" lgr %0,3\n"
".section __ex_table,\"a\"\n"
" .align 8\n"
" .quad 0b,__copy_to_user_fixup\n"
".previous"
: "+&d" (n) : "d" (to), "d" (from)
: "cc", "1", "2", "3", "4", "5" );
return n;
}
extern long __copy_to_user_asm(const void *from, long n, void *to);
#define __copy_to_user(to, from, n) \
({ \
__copy_to_user_asm(to,from,n); \
__copy_to_user_asm(from, n, to); \
})
#define copy_to_user(to, from, n) \
......@@ -371,38 +335,18 @@ __copy_to_user_asm(void* to, const void* from, long n)
long err = 0; \
__typeof__(n) __n = (n); \
if (__access_ok(to,__n)) { \
err = __copy_to_user_asm(to,from,__n); \
err = __copy_to_user_asm(from, __n, to); \
} \
else \
err = __n; \
err; \
})
extern inline unsigned long
__copy_from_user_asm(void* to, const void* from, long n)
{
__asm__ __volatile__ ( " lgr 2,%1\n"
" lgr 4,%2\n"
" lgr 3,%0\n"
" lgr 5,3\n"
" sacf 512\n"
"0: mvcle 2,4,0\n"
" jo 0b\n"
" sacf 0\n"
" lgr %0,5\n"
".section __ex_table,\"a\"\n"
" .align 8\n"
" .quad 0b,__copy_from_user_fixup\n"
".previous"
: "+&d" (n) : "d" (to), "d" (from)
: "cc", "1", "2", "3", "4", "5" );
return n;
}
extern long __copy_from_user_asm(void *to, long n, const void *from);
#define __copy_from_user(to, from, n) \
({ \
__copy_from_user_asm(to,from,n); \
__copy_from_user_asm(to, n, from); \
})
#define copy_from_user(to, from, n) \
......@@ -410,7 +354,7 @@ __copy_from_user_asm(void* to, const void* from, long n)
long err = 0; \
__typeof__(n) __n = (n); \
if (__access_ok(from,__n)) { \
err = __copy_from_user_asm(to,from,__n); \
err = __copy_from_user_asm(to, __n, from); \
} \
else \
err = __n; \
......@@ -520,27 +464,12 @@ strnlen_user(const char * src, unsigned long n)
* Zero Userspace
*/
static inline unsigned long
__clear_user(void *to, unsigned long n)
{
__asm__ __volatile__ ( " sacf 512\n"
" lgr 4,%1\n"
" lgr 5,%0\n"
" sgr 2,2\n"
" sgr 3,3\n"
"0: mvcle 4,2,0\n"
" jo 0b\n"
"1: sacf 0\n"
" lgr %0,5\n"
".section __ex_table,\"a\"\n"
" .align 8\n"
" .quad 0b,__copy_to_user_fixup\n"
".previous"
: "+&a" (n)
: "a" (to)
: "cc", "1", "2", "3", "4", "5" );
return n;
}
extern long __clear_user_asm(void *to, long n);
#define __clear_user(to, n) \
({ \
__clear_user_asm(to, n); \
})
static inline unsigned long
clear_user(void *to, unsigned long n)
......
......@@ -180,9 +180,24 @@
#define __NR_pivot_root 217
#define __NR_mincore 218
#define __NR_madvise 219
#define __NR_getdents64 220
#define __NR_gettid 226
#define __NR_tkill 227
#define __NR_readahead 222
#define __NR_setxattr 224
#define __NR_lsetxattr 225
#define __NR_fsetxattr 226
#define __NR_getxattr 227
#define __NR_lgetxattr 228
#define __NR_fgetxattr 229
#define __NR_listxattr 230
#define __NR_llistxattr 231
#define __NR_flistxattr 232
#define __NR_removexattr 233
#define __NR_lremovexattr 234
#define __NR_fremovexattr 235
#define __NR_gettid 236
#define __NR_tkill 237
#define __NR_futex 238
#define __NR_sched_setaffinity 239
#define __NR_sched_getaffinity 240
/* user-visible error numbers are in the range -1 - -122: see <asm-s390/errno.h> */
......@@ -305,6 +320,8 @@ type name(type1 arg1, type2 arg2, type3 arg3, type4 arg4, \
#ifdef __KERNEL_SYSCALLS__
#include <asm/stat.h>
/*
* we need this inline - forking from kernel space will result
* in NO COPY ON WRITE (!!!), until an execve is executed. This
......@@ -333,7 +350,9 @@ static inline _syscall1(int,_exit,int,exitcode)
static inline _syscall1(int,delete_module,const char *,name)
static inline _syscall2(long,stat,char *,filename,struct stat *,statbuf)
static inline pid_t waitpid(int pid, int * wait_stat, int flags)
struct rusage;
extern long sys_wait4(pid_t, unsigned int *, int, struct rusage *);
static inline pid_t waitpid(int pid, int *wait_stat, int flags)
{
return sys_wait4(pid, wait_stat, flags, NULL);
}
......
......@@ -18,7 +18,6 @@
#include <asm/dasd.h>
#endif
#define DASD_API_VERSION 0
#define LINE_LENGTH 80
#define VTOC_START_CC 0x0
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment