Commit 7c6d4dca authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mattst88/alpha

Pull alpha architecture fixes from Matt Turner:
 "This contains mostly clean ups and fixes but also an implementation of
  atomic64_dec_if_positive() and a pair of new syscalls"

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/mattst88/alpha:
  alpha: Use handle_percpu_irq for the timer interrupt
  alpha: Force the user-visible HZ to a constant 1024.
  alpha: Don't if-out dp264_device_interrupt.
  alpha: Use __builtin_alpha_rpcc
  alpha: Fix type compatibility warning for marvel_map_irq
  alpha: Generate dwarf2 unwind info for various kernel entry points.
  alpha: Implement atomic64_dec_if_positive
  alpha: Improve atomic_add_unless
  alpha: Modernize lib/mpi/longlong.h
  alpha: Add kcmp and finit_module syscalls
  alpha: locks: remove unused arch_*_relax operations
  alpha: kernel: typo issue, using '1' instead of '11'
  alpha: kernel: using memcpy() instead of strcpy()
  alpha: Convert print_symbol to %pSR
parents a030cbc3 dff64649
...@@ -15,6 +15,7 @@ config ALPHA ...@@ -15,6 +15,7 @@ config ALPHA
select ARCH_WANT_OPTIONAL_GPIOLIB select ARCH_WANT_OPTIONAL_GPIOLIB
select ARCH_WANT_IPC_PARSE_VERSION select ARCH_WANT_IPC_PARSE_VERSION
select ARCH_HAVE_NMI_SAFE_CMPXCHG select ARCH_HAVE_NMI_SAFE_CMPXCHG
select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
select GENERIC_SMP_IDLE_THREAD select GENERIC_SMP_IDLE_THREAD
select GENERIC_CMOS_UPDATE select GENERIC_CMOS_UPDATE
select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNCPY_FROM_USER
......
...@@ -186,17 +186,24 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v) ...@@ -186,17 +186,24 @@ static __inline__ long atomic64_sub_return(long i, atomic64_t * v)
*/ */
static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
{ {
int c, old; int c, new, old;
c = atomic_read(v); smp_mb();
for (;;) { __asm__ __volatile__(
if (unlikely(c == (u))) "1: ldl_l %[old],%[mem]\n"
break; " cmpeq %[old],%[u],%[c]\n"
old = atomic_cmpxchg((v), c, c + (a)); " addl %[old],%[a],%[new]\n"
if (likely(old == c)) " bne %[c],2f\n"
break; " stl_c %[new],%[mem]\n"
c = old; " beq %[new],3f\n"
} "2:\n"
return c; ".subsection 2\n"
"3: br 1b\n"
".previous"
: [old] "=&r"(old), [new] "=&r"(new), [c] "=&r"(c)
: [mem] "m"(*v), [a] "rI"(a), [u] "rI"((long)u)
: "memory");
smp_mb();
return old;
} }
...@@ -207,21 +214,56 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u) ...@@ -207,21 +214,56 @@ static __inline__ int __atomic_add_unless(atomic_t *v, int a, int u)
* @u: ...unless v is equal to u. * @u: ...unless v is equal to u.
* *
* Atomically adds @a to @v, so long as it was not @u. * Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v. * Returns true iff @v was not @u.
*/ */
static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u) static __inline__ int atomic64_add_unless(atomic64_t *v, long a, long u)
{ {
long c, old; long c, tmp;
c = atomic64_read(v); smp_mb();
for (;;) { __asm__ __volatile__(
if (unlikely(c == (u))) "1: ldq_l %[tmp],%[mem]\n"
break; " cmpeq %[tmp],%[u],%[c]\n"
old = atomic64_cmpxchg((v), c, c + (a)); " addq %[tmp],%[a],%[tmp]\n"
if (likely(old == c)) " bne %[c],2f\n"
break; " stq_c %[tmp],%[mem]\n"
c = old; " beq %[tmp],3f\n"
} "2:\n"
return c != (u); ".subsection 2\n"
"3: br 1b\n"
".previous"
: [tmp] "=&r"(tmp), [c] "=&r"(c)
: [mem] "m"(*v), [a] "rI"(a), [u] "rI"(u)
: "memory");
smp_mb();
return !c;
}
/*
* atomic64_dec_if_positive - decrement by 1 if old value positive
* @v: pointer of type atomic_t
*
* The function returns the old value of *v minus 1, even if
* the atomic variable, v, was not decremented.
*/
static inline long atomic64_dec_if_positive(atomic64_t *v)
{
long old, tmp;
smp_mb();
__asm__ __volatile__(
"1: ldq_l %[old],%[mem]\n"
" subq %[old],1,%[tmp]\n"
" ble %[old],2f\n"
" stq_c %[tmp],%[mem]\n"
" beq %[tmp],3f\n"
"2:\n"
".subsection 2\n"
"3: br 1b\n"
".previous"
: [old] "=&r"(old), [tmp] "=&r"(tmp)
: [mem] "m"(*v)
: "memory");
smp_mb();
return old - 1;
} }
#define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0) #define atomic64_inc_not_zero(v) atomic64_add_unless((v), 1, 0)
......
...@@ -3,7 +3,9 @@ ...@@ -3,7 +3,9 @@
#include <uapi/asm/param.h> #include <uapi/asm/param.h>
#define HZ CONFIG_HZ # undef HZ
#define USER_HZ HZ # define HZ CONFIG_HZ
# define CLOCKS_PER_SEC HZ /* frequency at which times() counts */ # define USER_HZ 1024
# define CLOCKS_PER_SEC USER_HZ /* frequency at which times() counts */
#endif /* _ASM_ALPHA_PARAM_H */ #endif /* _ASM_ALPHA_PARAM_H */
...@@ -168,8 +168,4 @@ static inline void arch_write_unlock(arch_rwlock_t * lock) ...@@ -168,8 +168,4 @@ static inline void arch_write_unlock(arch_rwlock_t * lock)
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) #define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) #define arch_write_lock_flags(lock, flags) arch_write_lock(lock)
#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
#define arch_write_relax(lock) cpu_relax()
#endif /* _ALPHA_SPINLOCK_H */ #endif /* _ALPHA_SPINLOCK_H */
...@@ -3,8 +3,7 @@ ...@@ -3,8 +3,7 @@
#include <uapi/asm/unistd.h> #include <uapi/asm/unistd.h>
#define NR_SYSCALLS 508
#define NR_SYSCALLS 506
#define __ARCH_WANT_OLD_READDIR #define __ARCH_WANT_OLD_READDIR
#define __ARCH_WANT_STAT64 #define __ARCH_WANT_STAT64
......
#ifndef _UAPI_ASM_ALPHA_PARAM_H #ifndef _UAPI_ASM_ALPHA_PARAM_H
#define _UAPI_ASM_ALPHA_PARAM_H #define _UAPI_ASM_ALPHA_PARAM_H
/* ??? Gross. I don't want to parameterize this, and supposedly the
hardware ignores reprogramming. We also need userland buy-in to the
change in HZ, since this is visible in the wait4 resources etc. */
#ifndef __KERNEL__
#define HZ 1024 #define HZ 1024
#endif
#define EXEC_PAGESIZE 8192 #define EXEC_PAGESIZE 8192
...@@ -17,5 +11,4 @@ ...@@ -17,5 +11,4 @@
#define MAXHOSTNAMELEN 64 /* max length of hostname */ #define MAXHOSTNAMELEN 64 /* max length of hostname */
#endif /* _UAPI_ASM_ALPHA_PARAM_H */ #endif /* _UAPI_ASM_ALPHA_PARAM_H */
...@@ -467,5 +467,7 @@ ...@@ -467,5 +467,7 @@
#define __NR_sendmmsg 503 #define __NR_sendmmsg 503
#define __NR_process_vm_readv 504 #define __NR_process_vm_readv 504
#define __NR_process_vm_writev 505 #define __NR_process_vm_writev 505
#define __NR_kcmp 506
#define __NR_finit_module 507
#endif /* _UAPI_ALPHA_UNISTD_H */ #endif /* _UAPI_ALPHA_UNISTD_H */
...@@ -12,11 +12,32 @@ ...@@ -12,11 +12,32 @@
.text .text
.set noat .set noat
.cfi_sections .debug_frame
/* Stack offsets. */ /* Stack offsets. */
#define SP_OFF 184 #define SP_OFF 184
#define SWITCH_STACK_SIZE 320 #define SWITCH_STACK_SIZE 320
.macro CFI_START_OSF_FRAME func
.align 4
.globl \func
.type \func,@function
\func:
.cfi_startproc simple
.cfi_return_column 64
.cfi_def_cfa $sp, 48
.cfi_rel_offset 64, 8
.cfi_rel_offset $gp, 16
.cfi_rel_offset $16, 24
.cfi_rel_offset $17, 32
.cfi_rel_offset $18, 40
.endm
.macro CFI_END_OSF_FRAME func
.cfi_endproc
.size \func, . - \func
.endm
/* /*
* This defines the normal kernel pt-regs layout. * This defines the normal kernel pt-regs layout.
* *
...@@ -27,100 +48,158 @@ ...@@ -27,100 +48,158 @@
* the palcode-provided values are available to the signal handler. * the palcode-provided values are available to the signal handler.
*/ */
#define SAVE_ALL \ .macro SAVE_ALL
subq $sp, SP_OFF, $sp; \ subq $sp, SP_OFF, $sp
stq $0, 0($sp); \ .cfi_adjust_cfa_offset SP_OFF
stq $1, 8($sp); \ stq $0, 0($sp)
stq $2, 16($sp); \ stq $1, 8($sp)
stq $3, 24($sp); \ stq $2, 16($sp)
stq $4, 32($sp); \ stq $3, 24($sp)
stq $28, 144($sp); \ stq $4, 32($sp)
lda $2, alpha_mv; \ stq $28, 144($sp)
stq $5, 40($sp); \ .cfi_rel_offset $0, 0
stq $6, 48($sp); \ .cfi_rel_offset $1, 8
stq $7, 56($sp); \ .cfi_rel_offset $2, 16
stq $8, 64($sp); \ .cfi_rel_offset $3, 24
stq $19, 72($sp); \ .cfi_rel_offset $4, 32
stq $20, 80($sp); \ .cfi_rel_offset $28, 144
stq $21, 88($sp); \ lda $2, alpha_mv
ldq $2, HAE_CACHE($2); \ stq $5, 40($sp)
stq $22, 96($sp); \ stq $6, 48($sp)
stq $23, 104($sp); \ stq $7, 56($sp)
stq $24, 112($sp); \ stq $8, 64($sp)
stq $25, 120($sp); \ stq $19, 72($sp)
stq $26, 128($sp); \ stq $20, 80($sp)
stq $27, 136($sp); \ stq $21, 88($sp)
stq $2, 152($sp); \ ldq $2, HAE_CACHE($2)
stq $16, 160($sp); \ stq $22, 96($sp)
stq $17, 168($sp); \ stq $23, 104($sp)
stq $24, 112($sp)
stq $25, 120($sp)
stq $26, 128($sp)
stq $27, 136($sp)
stq $2, 152($sp)
stq $16, 160($sp)
stq $17, 168($sp)
stq $18, 176($sp) stq $18, 176($sp)
.cfi_rel_offset $5, 40
.cfi_rel_offset $6, 48
.cfi_rel_offset $7, 56
.cfi_rel_offset $8, 64
.cfi_rel_offset $19, 72
.cfi_rel_offset $20, 80
.cfi_rel_offset $21, 88
.cfi_rel_offset $22, 96
.cfi_rel_offset $23, 104
.cfi_rel_offset $24, 112
.cfi_rel_offset $25, 120
.cfi_rel_offset $26, 128
.cfi_rel_offset $27, 136
.endm
#define RESTORE_ALL \ .macro RESTORE_ALL
lda $19, alpha_mv; \ lda $19, alpha_mv
ldq $0, 0($sp); \ ldq $0, 0($sp)
ldq $1, 8($sp); \ ldq $1, 8($sp)
ldq $2, 16($sp); \ ldq $2, 16($sp)
ldq $3, 24($sp); \ ldq $3, 24($sp)
ldq $21, 152($sp); \ ldq $21, 152($sp)
ldq $20, HAE_CACHE($19); \ ldq $20, HAE_CACHE($19)
ldq $4, 32($sp); \ ldq $4, 32($sp)
ldq $5, 40($sp); \ ldq $5, 40($sp)
ldq $6, 48($sp); \ ldq $6, 48($sp)
ldq $7, 56($sp); \ ldq $7, 56($sp)
subq $20, $21, $20; \ subq $20, $21, $20
ldq $8, 64($sp); \ ldq $8, 64($sp)
beq $20, 99f; \ beq $20, 99f
ldq $20, HAE_REG($19); \ ldq $20, HAE_REG($19)
stq $21, HAE_CACHE($19); \ stq $21, HAE_CACHE($19)
stq $21, 0($20); \ stq $21, 0($20)
99:; \ 99: ldq $19, 72($sp)
ldq $19, 72($sp); \ ldq $20, 80($sp)
ldq $20, 80($sp); \ ldq $21, 88($sp)
ldq $21, 88($sp); \ ldq $22, 96($sp)
ldq $22, 96($sp); \ ldq $23, 104($sp)
ldq $23, 104($sp); \ ldq $24, 112($sp)
ldq $24, 112($sp); \ ldq $25, 120($sp)
ldq $25, 120($sp); \ ldq $26, 128($sp)
ldq $26, 128($sp); \ ldq $27, 136($sp)
ldq $27, 136($sp); \ ldq $28, 144($sp)
ldq $28, 144($sp); \
addq $sp, SP_OFF, $sp addq $sp, SP_OFF, $sp
.cfi_restore $0
.cfi_restore $1
.cfi_restore $2
.cfi_restore $3
.cfi_restore $4
.cfi_restore $5
.cfi_restore $6
.cfi_restore $7
.cfi_restore $8
.cfi_restore $19
.cfi_restore $20
.cfi_restore $21
.cfi_restore $22
.cfi_restore $23
.cfi_restore $24
.cfi_restore $25
.cfi_restore $26
.cfi_restore $27
.cfi_restore $28
.cfi_adjust_cfa_offset -SP_OFF
.endm
.macro DO_SWITCH_STACK
bsr $1, do_switch_stack
.cfi_adjust_cfa_offset SWITCH_STACK_SIZE
.cfi_rel_offset $9, 0
.cfi_rel_offset $10, 8
.cfi_rel_offset $11, 16
.cfi_rel_offset $12, 24
.cfi_rel_offset $13, 32
.cfi_rel_offset $14, 40
.cfi_rel_offset $15, 48
/* We don't really care about the FP registers for debugging. */
.endm
.macro UNDO_SWITCH_STACK
bsr $1, undo_switch_stack
.cfi_restore $9
.cfi_restore $10
.cfi_restore $11
.cfi_restore $12
.cfi_restore $13
.cfi_restore $14
.cfi_restore $15
.cfi_adjust_cfa_offset -SWITCH_STACK_SIZE
.endm
/* /*
* Non-syscall kernel entry points. * Non-syscall kernel entry points.
*/ */
.align 4 CFI_START_OSF_FRAME entInt
.globl entInt
.ent entInt
entInt:
SAVE_ALL SAVE_ALL
lda $8, 0x3fff lda $8, 0x3fff
lda $26, ret_from_sys_call lda $26, ret_from_sys_call
bic $sp, $8, $8 bic $sp, $8, $8
mov $sp, $19 mov $sp, $19
jsr $31, do_entInt jsr $31, do_entInt
.end entInt CFI_END_OSF_FRAME entInt
.align 4 CFI_START_OSF_FRAME entArith
.globl entArith
.ent entArith
entArith:
SAVE_ALL SAVE_ALL
lda $8, 0x3fff lda $8, 0x3fff
lda $26, ret_from_sys_call lda $26, ret_from_sys_call
bic $sp, $8, $8 bic $sp, $8, $8
mov $sp, $18 mov $sp, $18
jsr $31, do_entArith jsr $31, do_entArith
.end entArith CFI_END_OSF_FRAME entArith
.align 4 CFI_START_OSF_FRAME entMM
.globl entMM
.ent entMM
entMM:
SAVE_ALL SAVE_ALL
/* save $9 - $15 so the inline exception code can manipulate them. */ /* save $9 - $15 so the inline exception code can manipulate them. */
subq $sp, 56, $sp subq $sp, 56, $sp
.cfi_adjust_cfa_offset 56
stq $9, 0($sp) stq $9, 0($sp)
stq $10, 8($sp) stq $10, 8($sp)
stq $11, 16($sp) stq $11, 16($sp)
...@@ -128,6 +207,13 @@ entMM: ...@@ -128,6 +207,13 @@ entMM:
stq $13, 32($sp) stq $13, 32($sp)
stq $14, 40($sp) stq $14, 40($sp)
stq $15, 48($sp) stq $15, 48($sp)
.cfi_rel_offset $9, 0
.cfi_rel_offset $10, 8
.cfi_rel_offset $11, 16
.cfi_rel_offset $12, 24
.cfi_rel_offset $13, 32
.cfi_rel_offset $14, 40
.cfi_rel_offset $15, 48
addq $sp, 56, $19 addq $sp, 56, $19
/* handle the fault */ /* handle the fault */
lda $8, 0x3fff lda $8, 0x3fff
...@@ -142,28 +228,33 @@ entMM: ...@@ -142,28 +228,33 @@ entMM:
ldq $14, 40($sp) ldq $14, 40($sp)
ldq $15, 48($sp) ldq $15, 48($sp)
addq $sp, 56, $sp addq $sp, 56, $sp
.cfi_restore $9
.cfi_restore $10
.cfi_restore $11
.cfi_restore $12
.cfi_restore $13
.cfi_restore $14
.cfi_restore $15
.cfi_adjust_cfa_offset -56
/* finish up the syscall as normal. */ /* finish up the syscall as normal. */
br ret_from_sys_call br ret_from_sys_call
.end entMM CFI_END_OSF_FRAME entMM
.align 4 CFI_START_OSF_FRAME entIF
.globl entIF
.ent entIF
entIF:
SAVE_ALL SAVE_ALL
lda $8, 0x3fff lda $8, 0x3fff
lda $26, ret_from_sys_call lda $26, ret_from_sys_call
bic $sp, $8, $8 bic $sp, $8, $8
mov $sp, $17 mov $sp, $17
jsr $31, do_entIF jsr $31, do_entIF
.end entIF CFI_END_OSF_FRAME entIF
.align 4 CFI_START_OSF_FRAME entUna
.globl entUna
.ent entUna
entUna:
lda $sp, -256($sp) lda $sp, -256($sp)
.cfi_adjust_cfa_offset 256
stq $0, 0($sp) stq $0, 0($sp)
.cfi_rel_offset $0, 0
.cfi_remember_state
ldq $0, 256($sp) /* get PS */ ldq $0, 256($sp) /* get PS */
stq $1, 8($sp) stq $1, 8($sp)
stq $2, 16($sp) stq $2, 16($sp)
...@@ -195,6 +286,32 @@ entUna: ...@@ -195,6 +286,32 @@ entUna:
stq $28, 224($sp) stq $28, 224($sp)
mov $sp, $19 mov $sp, $19
stq $gp, 232($sp) stq $gp, 232($sp)
.cfi_rel_offset $1, 1*8
.cfi_rel_offset $2, 2*8
.cfi_rel_offset $3, 3*8
.cfi_rel_offset $4, 4*8
.cfi_rel_offset $5, 5*8
.cfi_rel_offset $6, 6*8
.cfi_rel_offset $7, 7*8
.cfi_rel_offset $8, 8*8
.cfi_rel_offset $9, 9*8
.cfi_rel_offset $10, 10*8
.cfi_rel_offset $11, 11*8
.cfi_rel_offset $12, 12*8
.cfi_rel_offset $13, 13*8
.cfi_rel_offset $14, 14*8
.cfi_rel_offset $15, 15*8
.cfi_rel_offset $19, 19*8
.cfi_rel_offset $20, 20*8
.cfi_rel_offset $21, 21*8
.cfi_rel_offset $22, 22*8
.cfi_rel_offset $23, 23*8
.cfi_rel_offset $24, 24*8
.cfi_rel_offset $25, 25*8
.cfi_rel_offset $26, 26*8
.cfi_rel_offset $27, 27*8
.cfi_rel_offset $28, 28*8
.cfi_rel_offset $29, 29*8
lda $8, 0x3fff lda $8, 0x3fff
stq $31, 248($sp) stq $31, 248($sp)
bic $sp, $8, $8 bic $sp, $8, $8
...@@ -228,16 +345,45 @@ entUna: ...@@ -228,16 +345,45 @@ entUna:
ldq $28, 224($sp) ldq $28, 224($sp)
ldq $gp, 232($sp) ldq $gp, 232($sp)
lda $sp, 256($sp) lda $sp, 256($sp)
.cfi_restore $1
.cfi_restore $2
.cfi_restore $3
.cfi_restore $4
.cfi_restore $5
.cfi_restore $6
.cfi_restore $7
.cfi_restore $8
.cfi_restore $9
.cfi_restore $10
.cfi_restore $11
.cfi_restore $12
.cfi_restore $13
.cfi_restore $14
.cfi_restore $15
.cfi_restore $19
.cfi_restore $20
.cfi_restore $21
.cfi_restore $22
.cfi_restore $23
.cfi_restore $24
.cfi_restore $25
.cfi_restore $26
.cfi_restore $27
.cfi_restore $28
.cfi_restore $29
.cfi_adjust_cfa_offset -256
call_pal PAL_rti call_pal PAL_rti
.end entUna
.align 4 .align 4
.ent entUnaUser
entUnaUser: entUnaUser:
.cfi_restore_state
ldq $0, 0($sp) /* restore original $0 */ ldq $0, 0($sp) /* restore original $0 */
lda $sp, 256($sp) /* pop entUna's stack frame */ lda $sp, 256($sp) /* pop entUna's stack frame */
.cfi_restore $0
.cfi_adjust_cfa_offset -256
SAVE_ALL /* setup normal kernel stack */ SAVE_ALL /* setup normal kernel stack */
lda $sp, -56($sp) lda $sp, -56($sp)
.cfi_adjust_cfa_offset 56
stq $9, 0($sp) stq $9, 0($sp)
stq $10, 8($sp) stq $10, 8($sp)
stq $11, 16($sp) stq $11, 16($sp)
...@@ -245,6 +391,13 @@ entUnaUser: ...@@ -245,6 +391,13 @@ entUnaUser:
stq $13, 32($sp) stq $13, 32($sp)
stq $14, 40($sp) stq $14, 40($sp)
stq $15, 48($sp) stq $15, 48($sp)
.cfi_rel_offset $9, 0
.cfi_rel_offset $10, 8
.cfi_rel_offset $11, 16
.cfi_rel_offset $12, 24
.cfi_rel_offset $13, 32
.cfi_rel_offset $14, 40
.cfi_rel_offset $15, 48
lda $8, 0x3fff lda $8, 0x3fff
addq $sp, 56, $19 addq $sp, 56, $19
bic $sp, $8, $8 bic $sp, $8, $8
...@@ -257,20 +410,25 @@ entUnaUser: ...@@ -257,20 +410,25 @@ entUnaUser:
ldq $14, 40($sp) ldq $14, 40($sp)
ldq $15, 48($sp) ldq $15, 48($sp)
lda $sp, 56($sp) lda $sp, 56($sp)
.cfi_restore $9
.cfi_restore $10
.cfi_restore $11
.cfi_restore $12
.cfi_restore $13
.cfi_restore $14
.cfi_restore $15
.cfi_adjust_cfa_offset -56
br ret_from_sys_call br ret_from_sys_call
.end entUnaUser CFI_END_OSF_FRAME entUna
.align 4 CFI_START_OSF_FRAME entDbg
.globl entDbg
.ent entDbg
entDbg:
SAVE_ALL SAVE_ALL
lda $8, 0x3fff lda $8, 0x3fff
lda $26, ret_from_sys_call lda $26, ret_from_sys_call
bic $sp, $8, $8 bic $sp, $8, $8
mov $sp, $16 mov $sp, $16
jsr $31, do_entDbg jsr $31, do_entDbg
.end entDbg CFI_END_OSF_FRAME entDbg
/* /*
* The system call entry point is special. Most importantly, it looks * The system call entry point is special. Most importantly, it looks
...@@ -285,8 +443,12 @@ entDbg: ...@@ -285,8 +443,12 @@ entDbg:
.align 4 .align 4
.globl entSys .globl entSys
.globl ret_from_sys_call .type entSys, @function
.ent entSys .cfi_startproc simple
.cfi_return_column 64
.cfi_def_cfa $sp, 48
.cfi_rel_offset 64, 8
.cfi_rel_offset $gp, 16
entSys: entSys:
SAVE_ALL SAVE_ALL
lda $8, 0x3fff lda $8, 0x3fff
...@@ -300,6 +462,9 @@ entSys: ...@@ -300,6 +462,9 @@ entSys:
stq $17, SP_OFF+32($sp) stq $17, SP_OFF+32($sp)
s8addq $0, $5, $5 s8addq $0, $5, $5
stq $18, SP_OFF+40($sp) stq $18, SP_OFF+40($sp)
.cfi_rel_offset $16, SP_OFF+24
.cfi_rel_offset $17, SP_OFF+32
.cfi_rel_offset $18, SP_OFF+40
blbs $3, strace blbs $3, strace
beq $4, 1f beq $4, 1f
ldq $27, 0($5) ldq $27, 0($5)
...@@ -310,6 +475,7 @@ entSys: ...@@ -310,6 +475,7 @@ entSys:
stq $31, 72($sp) /* a3=0 => no error */ stq $31, 72($sp) /* a3=0 => no error */
.align 4 .align 4
.globl ret_from_sys_call
ret_from_sys_call: ret_from_sys_call:
cmovne $26, 0, $18 /* $18 = 0 => non-restartable */ cmovne $26, 0, $18 /* $18 = 0 => non-restartable */
ldq $0, SP_OFF($sp) ldq $0, SP_OFF($sp)
...@@ -324,10 +490,12 @@ ret_to_user: ...@@ -324,10 +490,12 @@ ret_to_user:
and $17, _TIF_WORK_MASK, $2 and $17, _TIF_WORK_MASK, $2
bne $2, work_pending bne $2, work_pending
restore_all: restore_all:
.cfi_remember_state
RESTORE_ALL RESTORE_ALL
call_pal PAL_rti call_pal PAL_rti
ret_to_kernel: ret_to_kernel:
.cfi_restore_state
lda $16, 7 lda $16, 7
call_pal PAL_swpipl call_pal PAL_swpipl
br restore_all br restore_all
...@@ -356,7 +524,6 @@ $ret_success: ...@@ -356,7 +524,6 @@ $ret_success:
stq $0, 0($sp) stq $0, 0($sp)
stq $31, 72($sp) /* a3=0 => no error */ stq $31, 72($sp) /* a3=0 => no error */
br ret_from_sys_call br ret_from_sys_call
.end entSys
/* /*
* Do all cleanup when returning from all interrupts and system calls. * Do all cleanup when returning from all interrupts and system calls.
...@@ -370,7 +537,7 @@ $ret_success: ...@@ -370,7 +537,7 @@ $ret_success:
*/ */
.align 4 .align 4
.ent work_pending .type work_pending, @function
work_pending: work_pending:
and $17, _TIF_NOTIFY_RESUME | _TIF_SIGPENDING, $2 and $17, _TIF_NOTIFY_RESUME | _TIF_SIGPENDING, $2
bne $2, $work_notifysig bne $2, $work_notifysig
...@@ -387,23 +554,22 @@ $work_resched: ...@@ -387,23 +554,22 @@ $work_resched:
$work_notifysig: $work_notifysig:
mov $sp, $16 mov $sp, $16
bsr $1, do_switch_stack DO_SWITCH_STACK
jsr $26, do_work_pending jsr $26, do_work_pending
bsr $1, undo_switch_stack UNDO_SWITCH_STACK
br restore_all br restore_all
.end work_pending
/* /*
* PTRACE syscall handler * PTRACE syscall handler
*/ */
.align 4 .align 4
.ent strace .type strace, @function
strace: strace:
/* set up signal stack, call syscall_trace */ /* set up signal stack, call syscall_trace */
bsr $1, do_switch_stack DO_SWITCH_STACK
jsr $26, syscall_trace_enter /* returns the syscall number */ jsr $26, syscall_trace_enter /* returns the syscall number */
bsr $1, undo_switch_stack UNDO_SWITCH_STACK
/* get the arguments back.. */ /* get the arguments back.. */
ldq $16, SP_OFF+24($sp) ldq $16, SP_OFF+24($sp)
...@@ -431,9 +597,9 @@ ret_from_straced: ...@@ -431,9 +597,9 @@ ret_from_straced:
$strace_success: $strace_success:
stq $0, 0($sp) /* save return value */ stq $0, 0($sp) /* save return value */
bsr $1, do_switch_stack DO_SWITCH_STACK
jsr $26, syscall_trace_leave jsr $26, syscall_trace_leave
bsr $1, undo_switch_stack UNDO_SWITCH_STACK
br $31, ret_from_sys_call br $31, ret_from_sys_call
.align 3 .align 3
...@@ -447,26 +613,31 @@ $strace_error: ...@@ -447,26 +613,31 @@ $strace_error:
stq $0, 0($sp) stq $0, 0($sp)
stq $1, 72($sp) /* a3 for return */ stq $1, 72($sp) /* a3 for return */
bsr $1, do_switch_stack DO_SWITCH_STACK
mov $18, $9 /* save old syscall number */ mov $18, $9 /* save old syscall number */
mov $19, $10 /* save old a3 */ mov $19, $10 /* save old a3 */
jsr $26, syscall_trace_leave jsr $26, syscall_trace_leave
mov $9, $18 mov $9, $18
mov $10, $19 mov $10, $19
bsr $1, undo_switch_stack UNDO_SWITCH_STACK
mov $31, $26 /* tell "ret_from_sys_call" we can restart */ mov $31, $26 /* tell "ret_from_sys_call" we can restart */
br ret_from_sys_call br ret_from_sys_call
.end strace CFI_END_OSF_FRAME entSys
/* /*
* Save and restore the switch stack -- aka the balance of the user context. * Save and restore the switch stack -- aka the balance of the user context.
*/ */
.align 4 .align 4
.ent do_switch_stack .type do_switch_stack, @function
.cfi_startproc simple
.cfi_return_column 64
.cfi_def_cfa $sp, 0
.cfi_register 64, $1
do_switch_stack: do_switch_stack:
lda $sp, -SWITCH_STACK_SIZE($sp) lda $sp, -SWITCH_STACK_SIZE($sp)
.cfi_adjust_cfa_offset SWITCH_STACK_SIZE
stq $9, 0($sp) stq $9, 0($sp)
stq $10, 8($sp) stq $10, 8($sp)
stq $11, 16($sp) stq $11, 16($sp)
...@@ -510,10 +681,14 @@ do_switch_stack: ...@@ -510,10 +681,14 @@ do_switch_stack:
stt $f0, 312($sp) # save fpcr in slot of $f31 stt $f0, 312($sp) # save fpcr in slot of $f31
ldt $f0, 64($sp) # dont let "do_switch_stack" change fp state. ldt $f0, 64($sp) # dont let "do_switch_stack" change fp state.
ret $31, ($1), 1 ret $31, ($1), 1
.end do_switch_stack .cfi_endproc
.size do_switch_stack, .-do_switch_stack
.align 4 .align 4
.ent undo_switch_stack .type undo_switch_stack, @function
.cfi_startproc simple
.cfi_def_cfa $sp, 0
.cfi_register 64, $1
undo_switch_stack: undo_switch_stack:
ldq $9, 0($sp) ldq $9, 0($sp)
ldq $10, 8($sp) ldq $10, 8($sp)
...@@ -558,7 +733,8 @@ undo_switch_stack: ...@@ -558,7 +733,8 @@ undo_switch_stack:
ldt $f30, 304($sp) ldt $f30, 304($sp)
lda $sp, SWITCH_STACK_SIZE($sp) lda $sp, SWITCH_STACK_SIZE($sp)
ret $31, ($1), 1 ret $31, ($1), 1
.end undo_switch_stack .cfi_endproc
.size undo_switch_stack, .-undo_switch_stack
/* /*
* The meat of the context switch code. * The meat of the context switch code.
...@@ -566,17 +742,18 @@ undo_switch_stack: ...@@ -566,17 +742,18 @@ undo_switch_stack:
.align 4 .align 4
.globl alpha_switch_to .globl alpha_switch_to
.ent alpha_switch_to .type alpha_switch_to, @function
.cfi_startproc
alpha_switch_to: alpha_switch_to:
.prologue 0 DO_SWITCH_STACK
bsr $1, do_switch_stack
call_pal PAL_swpctx call_pal PAL_swpctx
lda $8, 0x3fff lda $8, 0x3fff
bsr $1, undo_switch_stack UNDO_SWITCH_STACK
bic $sp, $8, $8 bic $sp, $8, $8
mov $17, $0 mov $17, $0
ret ret
.end alpha_switch_to .cfi_endproc
.size alpha_switch_to, .-alpha_switch_to
/* /*
* New processes begin life here. * New processes begin life here.
......
...@@ -236,7 +236,7 @@ void __init ...@@ -236,7 +236,7 @@ void __init
init_rtc_irq(void) init_rtc_irq(void)
{ {
irq_set_chip_and_handler_name(RTC_IRQ, &dummy_irq_chip, irq_set_chip_and_handler_name(RTC_IRQ, &dummy_irq_chip,
handle_simple_irq, "RTC"); handle_percpu_irq, "RTC");
setup_irq(RTC_IRQ, &timer_irqaction); setup_irq(RTC_IRQ, &timer_irqaction);
} }
......
...@@ -264,9 +264,10 @@ recv_secondary_console_msg(void) ...@@ -264,9 +264,10 @@ recv_secondary_console_msg(void)
if (cnt <= 0 || cnt >= 80) if (cnt <= 0 || cnt >= 80)
strcpy(buf, "<<< BOGUS MSG >>>"); strcpy(buf, "<<< BOGUS MSG >>>");
else { else {
cp1 = (char *) &cpu->ipc_buffer[11]; cp1 = (char *) &cpu->ipc_buffer[1];
cp2 = buf; cp2 = buf;
strcpy(cp2, cp1); memcpy(cp2, cp1, cnt);
cp2[cnt] = '\0';
while ((cp2 = strchr(cp2, '\r')) != 0) { while ((cp2 = strchr(cp2, '\r')) != 0) {
*cp2 = ' '; *cp2 = ' ';
......
...@@ -190,9 +190,6 @@ static struct irq_chip clipper_irq_type = { ...@@ -190,9 +190,6 @@ static struct irq_chip clipper_irq_type = {
static void static void
dp264_device_interrupt(unsigned long vector) dp264_device_interrupt(unsigned long vector)
{ {
#if 1
printk("dp264_device_interrupt: NOT IMPLEMENTED YET!!\n");
#else
unsigned long pld; unsigned long pld;
unsigned int i; unsigned int i;
...@@ -210,12 +207,7 @@ dp264_device_interrupt(unsigned long vector) ...@@ -210,12 +207,7 @@ dp264_device_interrupt(unsigned long vector)
isa_device_interrupt(vector); isa_device_interrupt(vector);
else else
handle_irq(16 + i); handle_irq(16 + i);
#if 0
TSUNAMI_cchip->dir0.csr = 1UL << i; mb();
tmp = TSUNAMI_cchip->dir0.csr;
#endif
} }
#endif
} }
static void static void
......
...@@ -317,8 +317,9 @@ marvel_init_irq(void) ...@@ -317,8 +317,9 @@ marvel_init_irq(void)
} }
static int static int
marvel_map_irq(struct pci_dev *dev, u8 slot, u8 pin) marvel_map_irq(const struct pci_dev *cdev, u8 slot, u8 pin)
{ {
struct pci_dev *dev = (struct pci_dev *)cdev;
struct pci_controller *hose = dev->sysdata; struct pci_controller *hose = dev->sysdata;
struct io7_port *io7_port = hose->sysdata; struct io7_port *io7_port = hose->sysdata;
struct io7 *io7 = io7_port->io7; struct io7 *io7 = io7_port->io7;
......
...@@ -524,6 +524,8 @@ sys_call_table: ...@@ -524,6 +524,8 @@ sys_call_table:
.quad sys_sendmmsg .quad sys_sendmmsg
.quad sys_process_vm_readv .quad sys_process_vm_readv
.quad sys_process_vm_writev /* 505 */ .quad sys_process_vm_writev /* 505 */
.quad sys_kcmp
.quad sys_finit_module
.size sys_call_table, . - sys_call_table .size sys_call_table, . - sys_call_table
.type sys_call_table, @object .type sys_call_table, @object
......
...@@ -105,9 +105,7 @@ void arch_irq_work_raise(void) ...@@ -105,9 +105,7 @@ void arch_irq_work_raise(void)
static inline __u32 rpcc(void) static inline __u32 rpcc(void)
{ {
__u32 result; return __builtin_alpha_rpcc();
asm volatile ("rpcc %0" : "=r"(result));
return result;
} }
int update_persistent_clock(struct timespec now) int update_persistent_clock(struct timespec now)
......
...@@ -66,8 +66,8 @@ dik_show_regs(struct pt_regs *regs, unsigned long *r9_15) ...@@ -66,8 +66,8 @@ dik_show_regs(struct pt_regs *regs, unsigned long *r9_15)
{ {
printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx %s\n", printk("pc = [<%016lx>] ra = [<%016lx>] ps = %04lx %s\n",
regs->pc, regs->r26, regs->ps, print_tainted()); regs->pc, regs->r26, regs->ps, print_tainted());
print_symbol("pc is at %s\n", regs->pc); printk("pc is at %pSR\n", (void *)regs->pc);
print_symbol("ra is at %s\n", regs->r26 ); printk("ra is at %pSR\n", (void *)regs->r26);
printk("v0 = %016lx t0 = %016lx t1 = %016lx\n", printk("v0 = %016lx t0 = %016lx t1 = %016lx\n",
regs->r0, regs->r1, regs->r2); regs->r0, regs->r1, regs->r2);
printk("t2 = %016lx t3 = %016lx t4 = %016lx\n", printk("t2 = %016lx t3 = %016lx t4 = %016lx\n",
...@@ -132,9 +132,7 @@ dik_show_trace(unsigned long *sp) ...@@ -132,9 +132,7 @@ dik_show_trace(unsigned long *sp)
continue; continue;
if (tmp >= (unsigned long) &_etext) if (tmp >= (unsigned long) &_etext)
continue; continue;
printk("[<%lx>]", tmp); printk("[<%lx>] %pSR\n", tmp, (void *)tmp);
print_symbol(" %s", tmp);
printk("\n");
if (i > 40) { if (i > 40) {
printk(" ..."); printk(" ...");
break; break;
......
...@@ -151,15 +151,12 @@ do { \ ...@@ -151,15 +151,12 @@ do { \
#endif /* __a29k__ */ #endif /* __a29k__ */
#if defined(__alpha) && W_TYPE_SIZE == 64 #if defined(__alpha) && W_TYPE_SIZE == 64
#define umul_ppmm(ph, pl, m0, m1) \ #define umul_ppmm(ph, pl, m0, m1) \
do { \ do { \
UDItype __m0 = (m0), __m1 = (m1); \ UDItype __m0 = (m0), __m1 = (m1); \
__asm__ ("umulh %r1,%2,%0" \ (ph) = __builtin_alpha_umulh(__m0, __m1); \
: "=r" ((UDItype) ph) \ (pl) = __m0 * __m1; \
: "%rJ" (__m0), \ } while (0)
"rI" (__m1)); \
(pl) = __m0 * __m1; \
} while (0)
#define UMUL_TIME 46 #define UMUL_TIME 46
#ifndef LONGLONG_STANDALONE #ifndef LONGLONG_STANDALONE
#define udiv_qrnnd(q, r, n1, n0, d) \ #define udiv_qrnnd(q, r, n1, n0, d) \
...@@ -167,7 +164,7 @@ do { UDItype __r; \ ...@@ -167,7 +164,7 @@ do { UDItype __r; \
(q) = __udiv_qrnnd(&__r, (n1), (n0), (d)); \ (q) = __udiv_qrnnd(&__r, (n1), (n0), (d)); \
(r) = __r; \ (r) = __r; \
} while (0) } while (0)
extern UDItype __udiv_qrnnd(); extern UDItype __udiv_qrnnd(UDItype *, UDItype, UDItype, UDItype);
#define UDIV_TIME 220 #define UDIV_TIME 220
#endif /* LONGLONG_STANDALONE */ #endif /* LONGLONG_STANDALONE */
#endif /* __alpha */ #endif /* __alpha */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment