Commit 7d853ae3 authored by Martin Schwidefsky's avatar Martin Schwidefsky Committed by Linus Torvalds

[PATCH] s390: arch fixes.

 - Fix cflags for z990 compiles.
 - Rename resume to __switch_to to avoid name clash.
 - Fix show_trace and show_stack.
 - Add alignments to linker script.
 - Add atomic64_t and related funtions.
 - Add include/asm-s390/local.h
 - Fix 31 bit get_user for 8 byte values.
 - Fix show_regs oops.
 - Add a couple of might_sleep() calls.
 - Fix loading of modules with a BIG symbol table.
 - Fix inline asm constraint in __get_user_asm_1
 - Fix nested irq_enter bug on shutdown.
 - Add sched_clock function.
parent 41b986b6
...@@ -30,7 +30,7 @@ endif ...@@ -30,7 +30,7 @@ endif
cflags-$(CONFIG_MARCH_G5) += $(call check_gcc,-march=g5,) cflags-$(CONFIG_MARCH_G5) += $(call check_gcc,-march=g5,)
cflags-$(CONFIG_MARCH_Z900) += $(call check_gcc,-march=z900,) cflags-$(CONFIG_MARCH_Z900) += $(call check_gcc,-march=z900,)
cflags-$(CONFIG_MARCH_Z990) += $(call check_gcc,-march=trex,) cflags-$(CONFIG_MARCH_Z990) += $(call check_gcc,-march=z990,)
CFLAGS += $(cflags-y) CFLAGS += $(cflags-y)
CFLAGS += $(call check_gcc,-finline-limit=10000,) CFLAGS += $(call check_gcc,-finline-limit=10000,)
......
...@@ -7,7 +7,6 @@ COMPILE_VERSION := __linux_compile_version_id__`hostname | \ ...@@ -7,7 +7,6 @@ COMPILE_VERSION := __linux_compile_version_id__`hostname | \
tr -c '[0-9A-Za-z]' '_'`_t tr -c '[0-9A-Za-z]' '_'`_t
EXTRA_CFLAGS := -DCOMPILE_VERSION=$(COMPILE_VERSION) -gstabs -I . EXTRA_CFLAGS := -DCOMPILE_VERSION=$(COMPILE_VERSION) -gstabs -I .
EXTRA_AFLAGS := -traditional
targets := image targets := image
......
...@@ -123,18 +123,18 @@ entry_base: ...@@ -123,18 +123,18 @@ entry_base:
* Returns: * Returns:
* gpr2 = prev * gpr2 = prev
*/ */
.globl resume .globl __switch_to
resume: __switch_to:
basr %r1,0 basr %r1,0
resume_base: __switch_to_base:
tm __THREAD_per(%r3),0xe8 # new process is using per ? tm __THREAD_per(%r3),0xe8 # new process is using per ?
bz resume_noper-resume_base(%r1) # if not we're fine bz __switch_to_noper-__switch_to_base(%r1) # if not we're fine
stctl %c9,%c11,24(%r15) # We are using per stuff stctl %c9,%c11,24(%r15) # We are using per stuff
clc __THREAD_per(12,%r3),24(%r15) clc __THREAD_per(12,%r3),24(%r15)
be resume_noper-resume_base(%r1) # we got away w/o bashing TLB's be __switch_to_noper-__switch_to_base(%r1) # we got away w/o bashing TLB's
lctl %c9,%c11,__THREAD_per(%r3) # Nope we didn't lctl %c9,%c11,__THREAD_per(%r3) # Nope we didn't
resume_noper: __switch_to_noper:
stm %r6,%r15,24(%r15) # store resume registers of prev task stm %r6,%r15,24(%r15) # store __switch_to registers of prev task
st %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp st %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp
l %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp l %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp
stam %a2,%a2,__THREAD_ar2(%r2) # store kernel access reg. 2 stam %a2,%a2,__THREAD_ar2(%r2) # store kernel access reg. 2
...@@ -199,6 +199,8 @@ sysc_do_restart: ...@@ -199,6 +199,8 @@ sysc_do_restart:
sysc_return: sysc_return:
stnsm 24(%r15),0xfc # disable I/O and ext. interrupts stnsm 24(%r15),0xfc # disable I/O and ext. interrupts
tm SP_PSW+1(%r15),0x01 # returning to user ?
bno BASED(sysc_leave)
tm __TI_flags+3(%r9),_TIF_WORK_SVC tm __TI_flags+3(%r9),_TIF_WORK_SVC
bnz BASED(sysc_work) # there is work to do (signals etc.) bnz BASED(sysc_work) # there is work to do (signals etc.)
sysc_leave: sysc_leave:
......
...@@ -111,16 +111,16 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NEED_RESCHED) ...@@ -111,16 +111,16 @@ _TIF_WORK_INT = (_TIF_SIGPENDING | _TIF_NEED_RESCHED)
* Returns: * Returns:
* gpr2 = prev * gpr2 = prev
*/ */
.globl resume .globl __switch_to
resume: __switch_to:
tm __THREAD_per+4(%r3),0xe8 # is the new process using per ? tm __THREAD_per+4(%r3),0xe8 # is the new process using per ?
jz resume_noper # if not we're fine jz __switch_to_noper # if not we're fine
stctg %c9,%c11,48(%r15) # We are using per stuff stctg %c9,%c11,48(%r15) # We are using per stuff
clc __THREAD_per(24,%r3),48(%r15) clc __THREAD_per(24,%r3),48(%r15)
je resume_noper # we got away without bashing TLB's je __switch_to_noper # we got away without bashing TLB's
lctlg %c9,%c11,__THREAD_per(%r3) # Nope we didn't lctlg %c9,%c11,__THREAD_per(%r3) # Nope we didn't
resume_noper: __switch_to_noper:
stmg %r6,%r15,48(%r15) # store resume registers of prev task stmg %r6,%r15,48(%r15) # store __switch_to registers of prev task
stg %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp stg %r15,__THREAD_ksp(%r2) # store kernel stack to prev->tss.ksp
lg %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp lg %r15,__THREAD_ksp(%r3) # load kernel stack from next->tss.ksp
stam %a2,%a2,__THREAD_ar2(%r2) # store kernel access reg. 2 stam %a2,%a2,__THREAD_ar2(%r2) # store kernel access reg. 2
...@@ -187,6 +187,8 @@ sysc_noemu: ...@@ -187,6 +187,8 @@ sysc_noemu:
sysc_return: sysc_return:
stnsm 48(%r15),0xfc # disable I/O and ext. interrupts stnsm 48(%r15),0xfc # disable I/O and ext. interrupts
tm SP_PSW+1(%r15),0x01 # returning to user ?
jno sysc_leave
tm __TI_flags+7(%r9),_TIF_WORK_SVC tm __TI_flags+7(%r9),_TIF_WORK_SVC
jnz sysc_work # there is work to do (signals etc.) jnz sysc_work # there is work to do (signals etc.)
sysc_leave: sysc_leave:
......
...@@ -133,9 +133,8 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs, ...@@ -133,9 +133,8 @@ module_frob_arch_sections(Elf_Ehdr *hdr, Elf_Shdr *sechdrs,
/* Allocate one syminfo structure per symbol. */ /* Allocate one syminfo structure per symbol. */
me->arch.nsyms = symtab->sh_size / sizeof(Elf_Sym); me->arch.nsyms = symtab->sh_size / sizeof(Elf_Sym);
me->arch.syminfo = kmalloc(me->arch.nsyms * me->arch.syminfo = vmalloc(me->arch.nsyms *
sizeof(struct mod_arch_syminfo), sizeof(struct mod_arch_syminfo));
GFP_KERNEL);
if (!me->arch.syminfo) if (!me->arch.syminfo)
return -ENOMEM; return -ENOMEM;
symbols = (void *) hdr + symtab->sh_offset; symbols = (void *) hdr + symtab->sh_offset;
...@@ -397,7 +396,7 @@ int module_finalize(const Elf_Ehdr *hdr, ...@@ -397,7 +396,7 @@ int module_finalize(const Elf_Ehdr *hdr,
struct module *me) struct module *me)
{ {
if (me->arch.syminfo) if (me->arch.syminfo)
kfree(me->arch.syminfo); vfree(me->arch.syminfo);
return 0; return 0;
} }
......
...@@ -118,9 +118,6 @@ int cpu_idle(void) ...@@ -118,9 +118,6 @@ int cpu_idle(void)
return 0; return 0;
} }
extern void show_registers(struct pt_regs *regs);
extern void show_trace(unsigned long *sp);
void show_regs(struct pt_regs *regs) void show_regs(struct pt_regs *regs)
{ {
struct task_struct *tsk = current; struct task_struct *tsk = current;
...@@ -133,7 +130,7 @@ void show_regs(struct pt_regs *regs) ...@@ -133,7 +130,7 @@ void show_regs(struct pt_regs *regs)
show_registers(regs); show_registers(regs);
/* Show stack backtrace if pt_regs is from kernel mode */ /* Show stack backtrace if pt_regs is from kernel mode */
if (!(regs->psw.mask & PSW_MASK_PSTATE)) if (!(regs->psw.mask & PSW_MASK_PSTATE))
show_trace((unsigned long *) regs->gprs[15]); show_trace(0,(unsigned long *) regs->gprs[15]);
} }
extern void kernel_thread_starter(void); extern void kernel_thread_starter(void);
......
...@@ -231,6 +231,18 @@ void machine_restart_smp(char * __unused) ...@@ -231,6 +231,18 @@ void machine_restart_smp(char * __unused)
on_each_cpu(do_machine_restart, NULL, 0, 0); on_each_cpu(do_machine_restart, NULL, 0, 0);
} }
static void do_wait_for_stop(void)
{
unsigned long cr[16];
__ctl_store(cr, 0, 15);
cr[0] &= ~0xffff;
cr[6] = 0;
__ctl_load(cr, 0, 15);
for (;;)
enabled_wait();
}
static void do_machine_halt(void * __unused) static void do_machine_halt(void * __unused)
{ {
if (smp_processor_id() == 0) { if (smp_processor_id() == 0) {
...@@ -240,8 +252,7 @@ static void do_machine_halt(void * __unused) ...@@ -240,8 +252,7 @@ static void do_machine_halt(void * __unused)
signal_processor(smp_processor_id(), signal_processor(smp_processor_id(),
sigp_stop_and_store_status); sigp_stop_and_store_status);
} }
for (;;) do_wait_for_stop();
enabled_wait();
} }
void machine_halt_smp(void) void machine_halt_smp(void)
...@@ -258,8 +269,7 @@ static void do_machine_power_off(void * __unused) ...@@ -258,8 +269,7 @@ static void do_machine_power_off(void * __unused)
signal_processor(smp_processor_id(), signal_processor(smp_processor_id(),
sigp_stop_and_store_status); sigp_stop_and_store_status);
} }
for (;;) do_wait_for_stop();
enabled_wait();
} }
void machine_power_off_smp(void) void machine_power_off_smp(void)
...@@ -577,6 +587,7 @@ int setup_profiling_timer(unsigned int multiplier) ...@@ -577,6 +587,7 @@ int setup_profiling_timer(unsigned int multiplier)
return 0; return 0;
} }
EXPORT_SYMBOL(cpu_possible_map);
EXPORT_SYMBOL(lowcore_ptr); EXPORT_SYMBOL(lowcore_ptr);
EXPORT_SYMBOL(smp_ctl_set_bit); EXPORT_SYMBOL(smp_ctl_set_bit);
EXPORT_SYMBOL(smp_ctl_clear_bit); EXPORT_SYMBOL(smp_ctl_clear_bit);
......
...@@ -55,6 +55,14 @@ static u64 xtime_cc; ...@@ -55,6 +55,14 @@ static u64 xtime_cc;
extern unsigned long wall_jiffies; extern unsigned long wall_jiffies;
/*
* Scheduler clock - returns current time in nanosec units.
*/
unsigned long long sched_clock(void)
{
return (get_clock() - jiffies_timer_cc) >> 2;
}
void tod_to_timeval(__u64 todval, struct timespec *xtime) void tod_to_timeval(__u64 todval, struct timespec *xtime)
{ {
unsigned long long sec; unsigned long long sec;
...@@ -70,8 +78,7 @@ static inline unsigned long do_gettimeoffset(void) ...@@ -70,8 +78,7 @@ static inline unsigned long do_gettimeoffset(void)
{ {
__u64 now; __u64 now;
asm volatile ("STCK 0(%0)" : : "a" (&now) : "memory", "cc"); now = (get_clock() - jiffies_timer_cc) >> 12;
now = (now - jiffies_timer_cc) >> 12;
/* We require the offset from the latest update of xtime */ /* We require the offset from the latest update of xtime */
now -= (__u64) wall_jiffies*USECS_PER_JIFFY; now -= (__u64) wall_jiffies*USECS_PER_JIFFY;
return (unsigned long) now; return (unsigned long) now;
...@@ -165,8 +172,7 @@ static void do_comparator_interrupt(struct pt_regs *regs, __u16 error_code) ...@@ -165,8 +172,7 @@ static void do_comparator_interrupt(struct pt_regs *regs, __u16 error_code)
__u32 ticks; __u32 ticks;
/* Calculate how many ticks have passed. */ /* Calculate how many ticks have passed. */
asm volatile ("STCK 0(%0)" : : "a" (&tmp) : "memory", "cc"); tmp = get_clock() - S390_lowcore.jiffy_timer;
tmp = tmp - S390_lowcore.jiffy_timer;
if (tmp >= 2*CLK_TICKS_PER_JIFFY) { /* more than one tick ? */ if (tmp >= 2*CLK_TICKS_PER_JIFFY) { /* more than one tick ? */
ticks = __calculate_ticks(tmp); ticks = __calculate_ticks(tmp);
S390_lowcore.jiffy_timer += S390_lowcore.jiffy_timer +=
......
...@@ -83,7 +83,7 @@ void show_trace(struct task_struct *task, unsigned long * stack) ...@@ -83,7 +83,7 @@ void show_trace(struct task_struct *task, unsigned long * stack)
unsigned long backchain, low_addr, high_addr, ret_addr; unsigned long backchain, low_addr, high_addr, ret_addr;
if (!stack) if (!stack)
stack = *stack_pointer; stack = (task == NULL) ? *stack_pointer : &(task->thread.ksp);
printk("Call Trace:\n"); printk("Call Trace:\n");
low_addr = ((unsigned long) stack) & PSW_ADDR_INSN; low_addr = ((unsigned long) stack) & PSW_ADDR_INSN;
...@@ -120,8 +120,12 @@ void show_stack(struct task_struct *task, unsigned long *sp) ...@@ -120,8 +120,12 @@ void show_stack(struct task_struct *task, unsigned long *sp)
// debugging aid: "show_stack(NULL);" prints the // debugging aid: "show_stack(NULL);" prints the
// back trace for this cpu. // back trace for this cpu.
if(sp == NULL) if (!sp) {
if (task)
sp = (unsigned long *) task->thread.ksp;
else
sp = *stack_pointer; sp = *stack_pointer;
}
stack = sp; stack = sp;
for (i = 0; i < kstack_depth_to_print; i++) { for (i = 0; i < kstack_depth_to_print; i++) {
...@@ -140,7 +144,7 @@ void show_stack(struct task_struct *task, unsigned long *sp) ...@@ -140,7 +144,7 @@ void show_stack(struct task_struct *task, unsigned long *sp)
*/ */
void dump_stack(void) void dump_stack(void)
{ {
show_stack(current, 0); show_stack(0, 0);
} }
void show_registers(struct pt_regs *regs) void show_registers(struct pt_regs *regs)
......
...@@ -98,6 +98,7 @@ SECTIONS ...@@ -98,6 +98,7 @@ SECTIONS
. = ALIGN(256); . = ALIGN(256);
__initramfs_start = .; __initramfs_start = .;
.init.ramfs : { *(.init.initramfs) } .init.ramfs : { *(.init.initramfs) }
. = ALIGN(2);
__initramfs_end = .; __initramfs_end = .;
. = ALIGN(256); . = ALIGN(256);
__per_cpu_start = .; __per_cpu_start = .;
...@@ -109,6 +110,7 @@ SECTIONS ...@@ -109,6 +110,7 @@ SECTIONS
__bss_start = .; /* BSS */ __bss_start = .; /* BSS */
.bss : { *(.bss) } .bss : { *(.bss) }
. = ALIGN(2);
__bss_stop = .; __bss_stop = .;
_end = . ; _end = . ;
......
#ifndef __ARCH_S390_ATOMIC__ #ifndef __ARCH_S390_ATOMIC__
#define __ARCH_S390_ATOMIC__ #define __ARCH_S390_ATOMIC__
#ifdef __KERNEL__
/* /*
* include/asm-s390/atomic.h * include/asm-s390/atomic.h
* *
* S390 version * S390 version
* Copyright (C) 1999,2000 IBM Deutschland Entwicklung GmbH, IBM Corporation * Copyright (C) 1999-2003 IBM Deutschland Entwicklung GmbH, IBM Corporation
* Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com), * Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
* Denis Joseph Barrow * Denis Joseph Barrow,
* Arnd Bergmann (arndb@de.ibm.com)
* *
* Derived from "include/asm-i386/bitops.h" * Derived from "include/asm-i386/bitops.h"
* Copyright (C) 1992, Linus Torvalds * Copyright (C) 1992, Linus Torvalds
...@@ -20,12 +22,13 @@ ...@@ -20,12 +22,13 @@
* S390 uses 'Compare And Swap' for atomicity in SMP enviroment * S390 uses 'Compare And Swap' for atomicity in SMP enviroment
*/ */
typedef struct { volatile int counter; } __attribute__ ((aligned (4))) atomic_t; typedef struct {
volatile int counter;
} __attribute__ ((aligned (4))) atomic_t;
#define ATOMIC_INIT(i) { (i) } #define ATOMIC_INIT(i) { (i) }
#define atomic_eieio() __asm__ __volatile__ ("BCR 15,0") #define __CS_LOOP(ptr, op_val, op_string) ({ \
typeof(ptr->counter) old_val, new_val; \
#define __CS_LOOP(old_val, new_val, ptr, op_val, op_string) \
__asm__ __volatile__(" l %0,0(%3)\n" \ __asm__ __volatile__(" l %0,0(%3)\n" \
"0: lr %1,%0\n" \ "0: lr %1,%0\n" \
op_string " %1,%4\n" \ op_string " %1,%4\n" \
...@@ -33,92 +36,140 @@ typedef struct { volatile int counter; } __attribute__ ((aligned (4))) atomic_t; ...@@ -33,92 +36,140 @@ typedef struct { volatile int counter; } __attribute__ ((aligned (4))) atomic_t;
" jl 0b" \ " jl 0b" \
: "=&d" (old_val), "=&d" (new_val), \ : "=&d" (old_val), "=&d" (new_val), \
"+m" (((atomic_t *)(ptr))->counter) \ "+m" (((atomic_t *)(ptr))->counter) \
: "a" (ptr), "d" (op_val) : "cc" ); : "a" (ptr), "d" (op_val) : "cc" ); \
new_val; \
})
#define atomic_read(v) ((v)->counter) #define atomic_read(v) ((v)->counter)
#define atomic_set(v,i) (((v)->counter) = (i)) #define atomic_set(v,i) (((v)->counter) = (i))
static __inline__ void atomic_add(int i, atomic_t *v) static __inline__ void atomic_add(int i, atomic_t * v)
{ {
int old_val, new_val; __CS_LOOP(v, i, "ar");
__CS_LOOP(old_val, new_val, v, i, "ar");
} }
static __inline__ int atomic_add_return(int i, atomic_t * v)
static __inline__ int atomic_add_return (int i, atomic_t *v)
{ {
int old_val, new_val; return __CS_LOOP(v, i, "ar");
__CS_LOOP(old_val, new_val, v, i, "ar");
return new_val;
} }
static __inline__ int atomic_add_negative(int i, atomic_t * v)
static __inline__ int atomic_add_negative(int i, atomic_t *v)
{ {
int old_val, new_val; return __CS_LOOP(v, i, "ar") < 0;
__CS_LOOP(old_val, new_val, v, i, "ar");
return new_val < 0;
} }
static __inline__ void atomic_sub(int i, atomic_t * v)
static __inline__ void atomic_sub(int i, atomic_t *v)
{ {
int old_val, new_val; __CS_LOOP(v, i, "sr");
__CS_LOOP(old_val, new_val, v, i, "sr");
} }
static __inline__ void atomic_inc(volatile atomic_t * v)
static __inline__ void atomic_inc(volatile atomic_t *v)
{ {
int old_val, new_val; __CS_LOOP(v, 1, "ar");
__CS_LOOP(old_val, new_val, v, 1, "ar");
} }
static __inline__ int atomic_inc_return(volatile atomic_t * v)
static __inline__ int atomic_inc_return(volatile atomic_t *v)
{ {
int old_val, new_val; return __CS_LOOP(v, 1, "ar");
__CS_LOOP(old_val, new_val, v, 1, "ar");
return new_val;
} }
static __inline__ int atomic_inc_and_test(volatile atomic_t * v)
static __inline__ int atomic_inc_and_test(volatile atomic_t *v)
{ {
int old_val, new_val; return __CS_LOOP(v, 1, "ar") != 0;
__CS_LOOP(old_val, new_val, v, 1, "ar");
return new_val != 0;
} }
static __inline__ void atomic_dec(volatile atomic_t * v)
static __inline__ void atomic_dec(volatile atomic_t *v)
{ {
int old_val, new_val; __CS_LOOP(v, 1, "sr");
__CS_LOOP(old_val, new_val, v, 1, "sr");
} }
static __inline__ int atomic_dec_return(volatile atomic_t * v)
static __inline__ int atomic_dec_return(volatile atomic_t *v)
{ {
int old_val, new_val; return __CS_LOOP(v, 1, "sr");
__CS_LOOP(old_val, new_val, v, 1, "sr");
return new_val;
} }
static __inline__ int atomic_dec_and_test(volatile atomic_t * v)
static __inline__ int atomic_dec_and_test(volatile atomic_t *v)
{ {
int old_val, new_val; return __CS_LOOP(v, 1, "sr") == 0;
__CS_LOOP(old_val, new_val, v, 1, "sr");
return new_val == 0;
} }
static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t * v)
static __inline__ void atomic_clear_mask(unsigned long mask, atomic_t *v) {
__CS_LOOP(v, ~mask, "nr");
}
static __inline__ void atomic_set_mask(unsigned long mask, atomic_t * v)
{ {
int old_val, new_val; __CS_LOOP(v, mask, "or");
__CS_LOOP(old_val, new_val, v, ~mask, "nr");
} }
#undef __CS_LOOP
#ifdef __s390x__
typedef struct {
volatile long long counter;
} __attribute__ ((aligned (8))) atomic64_t;
#define ATOMIC64_INIT(i) { (i) }
#define __CSG_LOOP(ptr, op_val, op_string) ({ \
typeof(ptr->counter) old_val, new_val; \
__asm__ __volatile__(" lg %0,0(%3)\n" \
"0: lgr %1,%0\n" \
op_string " %1,%4\n" \
" csg %0,%1,0(%3)\n" \
" jl 0b" \
: "=&d" (old_val), "=&d" (new_val), \
"+m" (((atomic_t *)(ptr))->counter) \
: "a" (ptr), "d" (op_val) : "cc" ); \
new_val; \
})
#define atomic64_read(v) ((v)->counter)
#define atomic64_set(v,i) (((v)->counter) = (i))
static __inline__ void atomic_set_mask(unsigned long mask, atomic_t *v) static __inline__ void atomic64_add(int i, atomic64_t * v)
{ {
int old_val, new_val; __CSG_LOOP(v, i, "agr");
__CS_LOOP(old_val, new_val, v, mask, "or"); }
static __inline__ long long atomic64_add_return(int i, atomic64_t * v)
{
return __CSG_LOOP(v, i, "agr");
}
static __inline__ long long atomic64_add_negative(int i, atomic64_t * v)
{
return __CSG_LOOP(v, i, "agr") < 0;
}
static __inline__ void atomic64_sub(int i, atomic64_t * v)
{
__CSG_LOOP(v, i, "sgr");
}
static __inline__ void atomic64_inc(volatile atomic64_t * v)
{
__CSG_LOOP(v, 1, "agr");
}
static __inline__ long long atomic64_inc_return(volatile atomic64_t * v)
{
return __CSG_LOOP(v, 1, "agr");
}
static __inline__ long long atomic64_inc_and_test(volatile atomic64_t * v)
{
return __CSG_LOOP(v, 1, "agr") != 0;
}
static __inline__ void atomic64_dec(volatile atomic64_t * v)
{
__CSG_LOOP(v, 1, "sgr");
}
static __inline__ long long atomic64_dec_return(volatile atomic64_t * v)
{
return __CSG_LOOP(v, 1, "sgr");
}
static __inline__ long long atomic64_dec_and_test(volatile atomic64_t * v)
{
return __CSG_LOOP(v, 1, "sgr") == 0;
}
static __inline__ void atomic64_clear_mask(unsigned long mask, atomic64_t * v)
{
__CSG_LOOP(v, ~mask, "ngr");
}
static __inline__ void atomic64_set_mask(unsigned long mask, atomic64_t * v)
{
__CSG_LOOP(v, mask, "ogr");
} }
#undef __CSG_LOOP
#endif
/* /*
returns 0 if expected_oldval==value in *v ( swap was successful ) returns 0 if expected_oldval==value in *v ( swap was successful )
returns 1 if unsuccessful. returns 1 if unsuccessful.
This is non-portable, use bitops or spinlocks instead!
*/ */
static __inline__ int static __inline__ int
atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v) atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v)
...@@ -137,33 +188,10 @@ atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v) ...@@ -137,33 +188,10 @@ atomic_compare_and_swap(int expected_oldval,int new_val,atomic_t *v)
return retval; return retval;
} }
/*
Spin till *v = expected_oldval then swap with newval.
*/
static __inline__ void
atomic_compare_and_swap_spin(int expected_oldval,int new_val,atomic_t *v)
{
unsigned long tmp;
__asm__ __volatile__(
"0: lr %1,%3\n"
" cs %1,%4,0(%2)\n"
" jl 0b\n"
: "+m" (v->counter), "=&d" (tmp)
: "a" (v), "d" (expected_oldval) , "d" (new_val)
: "cc" );
}
#define atomic_compare_and_swap_debug(where,from,to) \
if (atomic_compare_and_swap ((from), (to), (where))) {\
printk (KERN_WARNING"%s/%d atomic counter:%s couldn't be changed from %d(%s) to %d(%s), was %d\n",\
__FILE__,__LINE__,#where,(from),#from,(to),#to,atomic_read (where));\
atomic_set(where,(to));\
}
#define smp_mb__before_atomic_dec() smp_mb() #define smp_mb__before_atomic_dec() smp_mb()
#define smp_mb__after_atomic_dec() smp_mb() #define smp_mb__after_atomic_dec() smp_mb()
#define smp_mb__before_atomic_inc() smp_mb() #define smp_mb__before_atomic_inc() smp_mb()
#define smp_mb__after_atomic_inc() smp_mb() #define smp_mb__after_atomic_inc() smp_mb()
#endif /* __ARCH_S390_ATOMIC __ */ #endif /* __KERNEL__ */
#endif /* __ARCH_S390_ATOMIC__ */
#ifndef _ASM_LOCAL_H
#define _ASM_LOCAL_H
#include <linux/config.h>
#include <linux/percpu.h>
#include <asm/atomic.h>
#ifndef __s390x__
typedef atomic_t local_t;
#define LOCAL_INIT(i) ATOMIC_INIT(i)
#define local_read(v) atomic_read(v)
#define local_set(v,i) atomic_set(v,i)
#define local_inc(v) atomic_inc(v)
#define local_dec(v) atomic_dec(v)
#define local_add(i, v) atomic_add(i, v)
#define local_sub(i, v) atomic_sub(i, v)
#else
typedef atomic64_t local_t;
#define LOCAL_INIT(i) ATOMIC64_INIT(i)
#define local_read(v) atomic64_read(v)
#define local_set(v,i) atomic64_set(v,i)
#define local_inc(v) atomic64_inc(v)
#define local_dec(v) atomic64_dec(v)
#define local_add(i, v) atomic64_add(i, v)
#define local_sub(i, v) atomic64_sub(i, v)
#endif
#define __local_inc(v) ((v)->counter++)
#define __local_dec(v) ((v)->counter--)
#define __local_add(i,v) ((v)->counter+=(i))
#define __local_sub(i,v) ((v)->counter-=(i))
/*
* Use these for per-cpu local_t variables: on some archs they are
* much more efficient than these naive implementations. Note they take
* a variable, not an address.
*/
#define cpu_local_read(v) local_read(&__get_cpu_var(v))
#define cpu_local_set(v, i) local_set(&__get_cpu_var(v), (i))
#define cpu_local_inc(v) local_inc(&__get_cpu_var(v))
#define cpu_local_dec(v) local_dec(&__get_cpu_var(v))
#define cpu_local_add(i, v) local_add((i), &__get_cpu_var(v))
#define cpu_local_sub(i, v) local_sub((i), &__get_cpu_var(v))
#define __cpu_local_inc(v) __local_inc(&__get_cpu_var(v))
#define __cpu_local_dec(v) __local_dec(&__get_cpu_var(v))
#define __cpu_local_add(i, v) __local_add((i), &__get_cpu_var(v))
#define __cpu_local_sub(i, v) __local_sub((i), &__get_cpu_var(v))
#endif /* _ASM_LOCAL_H */
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
/* S/390 systems don't have a PCI bus. This file is just here because some stupid .c code /* S/390 systems don't have a PCI bus. This file is just here because some stupid .c code
* includes it even if CONFIG_PCI is not set. * includes it even if CONFIG_PCI is not set.
*/ */
#define PCI_DMA_BUS_IS_PHYS (1) #define PCI_DMA_BUS_IS_PHYS (0)
#endif /* __ASM_S390_PCI_H */ #endif /* __ASM_S390_PCI_H */
...@@ -162,6 +162,9 @@ extern unsigned long thread_saved_pc(struct task_struct *t); ...@@ -162,6 +162,9 @@ extern unsigned long thread_saved_pc(struct task_struct *t);
*/ */
extern char *task_show_regs(struct task_struct *task, char *buffer); extern char *task_show_regs(struct task_struct *task, char *buffer);
extern void show_registers(struct pt_regs *regs);
extern void show_trace(struct task_struct *task, unsigned long *sp);
unsigned long get_wchan(struct task_struct *p); unsigned long get_wchan(struct task_struct *p);
#define __KSTK_PTREGS(tsk) ((struct pt_regs *) \ #define __KSTK_PTREGS(tsk) ((struct pt_regs *) \
(((unsigned long) tsk->thread_info + THREAD_SIZE - sizeof(struct pt_regs)) & -8L)) (((unsigned long) tsk->thread_info + THREAD_SIZE - sizeof(struct pt_regs)) & -8L))
......
#ifndef _S390_SECTIONS_H
#define _S390_SECTIONS_H
#include <asm-generic/sections.h>
#endif
...@@ -221,18 +221,18 @@ extern inline int _raw_write_trylock(rwlock_t *rw) ...@@ -221,18 +221,18 @@ extern inline int _raw_write_trylock(rwlock_t *rw)
__asm__ __volatile__( __asm__ __volatile__(
#ifndef __s390x__ #ifndef __s390x__
" lhi %0,1\n" " slr %0,%0\n"
" sll %0,31\n" " lhi %1,1\n"
" basr %1,0\n" " sll %1,31\n"
"0: cs %0,%1,0(%3)\n" " cs %0,%1,0(%3)"
#else /* __s390x__ */ #else /* __s390x__ */
" llihh %0,0x8000\n" " slgr %0,%0\n"
" basr %1,0\n" " llihh %1,0x8000\n"
"0: csg %0,%1,0(%3)\n" "0: csg %0,%1,0(%3)\n"
#endif /* __s390x__ */ #endif /* __s390x__ */
: "=&d" (result), "=&d" (reg), "+m" (rw->lock) : "=&d" (result), "=&d" (reg), "+m" (rw->lock)
: "a" (&rw->lock) : "cc" ); : "a" (&rw->lock) : "cc" );
return !result; return result == 0;
} }
#endif /* __ASM_SPINLOCK_H */ #endif /* __ASM_SPINLOCK_H */
...@@ -21,7 +21,7 @@ ...@@ -21,7 +21,7 @@
struct task_struct; struct task_struct;
extern struct task_struct *resume(void *, void *); extern struct task_struct *__switch_to(void *, void *);
#ifdef __s390x__ #ifdef __s390x__
#define __FLAG_SHIFT 56 #define __FLAG_SHIFT 56
...@@ -88,7 +88,7 @@ static inline void restore_fp_regs(s390_fp_regs *fpregs) ...@@ -88,7 +88,7 @@ static inline void restore_fp_regs(s390_fp_regs *fpregs)
break; \ break; \
save_fp_regs(&prev->thread.fp_regs); \ save_fp_regs(&prev->thread.fp_regs); \
restore_fp_regs(&next->thread.fp_regs); \ restore_fp_regs(&next->thread.fp_regs); \
prev = resume(prev,next); \ prev = __switch_to(prev,next); \
} while (0) } while (0)
#define nop() __asm__ __volatile__ ("nop") #define nop() __asm__ __volatile__ ("nop")
......
...@@ -216,7 +216,12 @@ struct exception_table_entry ...@@ -216,7 +216,12 @@ struct exception_table_entry
__pu_err; \ __pu_err; \
}) })
#define put_user(x, ptr) __put_user(x, ptr) #define put_user(x, ptr) \
({ \
might_sleep(); \
__put_user(x, ptr); \
})
extern int __put_user_bad(void); extern int __put_user_bad(void);
...@@ -224,18 +229,18 @@ extern int __put_user_bad(void); ...@@ -224,18 +229,18 @@ extern int __put_user_bad(void);
#define __get_user_asm_8(x, ptr, err) \ #define __get_user_asm_8(x, ptr, err) \
({ \ ({ \
register __typeof__(*(ptr)) const * __from asm("2"); \ register __typeof__(*(ptr)) const * __from asm("4"); \
register __typeof__(x) * __to asm("4"); \ register __typeof__(x) * __to asm("2"); \
__from = (ptr); \ __from = (ptr); \
__to = &(x); \ __to = &(x); \
__asm__ __volatile__ ( \ __asm__ __volatile__ ( \
" sacf 512\n" \ " sacf 512\n" \
"0: mvc 0(8,%1),0(%2)\n" \ "0: mvc 0(8,%2),0(%4)\n" \
" sacf 0\n" \ " sacf 0\n" \
"1:\n" \ "1:\n" \
__uaccess_fixup \ __uaccess_fixup \
: "=&d" (err), "=m" (x) \ : "=&d" (err), "=m" (x) \
: "a" (__to),"a" (__from),"K" (-EFAULT),"0" (0) \ : "a" (__to),"K" (-EFAULT),"a" (__from),"0" (0) \
: "cc" ); \ : "cc" ); \
}) })
...@@ -300,7 +305,7 @@ extern int __put_user_bad(void); ...@@ -300,7 +305,7 @@ extern int __put_user_bad(void);
" sacf 0\n" \ " sacf 0\n" \
"1:\n" \ "1:\n" \
__uaccess_fixup \ __uaccess_fixup \
: "=&d" (err), "=d" (x) \ : "=&d" (err), "=&d" (x) \
: "a" (__ptr), "K" (-EFAULT), "0" (0) \ : "a" (__ptr), "K" (-EFAULT), "0" (0) \
: "cc" ); \ : "cc" ); \
}) })
...@@ -331,7 +336,11 @@ extern int __put_user_bad(void); ...@@ -331,7 +336,11 @@ extern int __put_user_bad(void);
__gu_err; \ __gu_err; \
}) })
#define get_user(x, ptr) __get_user(x, ptr) #define get_user(x, ptr) \
({ \
might_sleep(); \
__get_user(x, ptr); \
})
extern int __get_user_bad(void); extern int __get_user_bad(void);
...@@ -351,6 +360,7 @@ extern long __copy_to_user_asm(const void *from, long n, const void *to); ...@@ -351,6 +360,7 @@ extern long __copy_to_user_asm(const void *from, long n, const void *to);
({ \ ({ \
long err = 0; \ long err = 0; \
__typeof__(n) __n = (n); \ __typeof__(n) __n = (n); \
might_sleep(); \
if (__access_ok(to,__n)) { \ if (__access_ok(to,__n)) { \
err = __copy_to_user_asm(from, __n, to); \ err = __copy_to_user_asm(from, __n, to); \
} \ } \
...@@ -370,6 +380,7 @@ extern long __copy_from_user_asm(void *to, long n, const void *from); ...@@ -370,6 +380,7 @@ extern long __copy_from_user_asm(void *to, long n, const void *from);
({ \ ({ \
long err = 0; \ long err = 0; \
__typeof__(n) __n = (n); \ __typeof__(n) __n = (n); \
might_sleep(); \
if (__access_ok(from,__n)) { \ if (__access_ok(from,__n)) { \
err = __copy_from_user_asm(to, __n, from); \ err = __copy_from_user_asm(to, __n, from); \
} \ } \
...@@ -461,6 +472,7 @@ static inline long ...@@ -461,6 +472,7 @@ static inline long
strncpy_from_user(char *dst, const char *src, long count) strncpy_from_user(char *dst, const char *src, long count)
{ {
long res = -EFAULT; long res = -EFAULT;
might_sleep();
if (access_ok(VERIFY_READ, src, 1)) if (access_ok(VERIFY_READ, src, 1))
res = __strncpy_from_user(dst, src, count); res = __strncpy_from_user(dst, src, count);
return res; return res;
...@@ -477,6 +489,7 @@ strncpy_from_user(char *dst, const char *src, long count) ...@@ -477,6 +489,7 @@ strncpy_from_user(char *dst, const char *src, long count)
static inline unsigned long static inline unsigned long
strnlen_user(const char * src, unsigned long n) strnlen_user(const char * src, unsigned long n)
{ {
might_sleep();
__asm__ __volatile__ ( __asm__ __volatile__ (
" alr %0,%1\n" " alr %0,%1\n"
" slr 0,0\n" " slr 0,0\n"
...@@ -510,6 +523,7 @@ strnlen_user(const char * src, unsigned long n) ...@@ -510,6 +523,7 @@ strnlen_user(const char * src, unsigned long n)
static inline unsigned long static inline unsigned long
strnlen_user(const char * src, unsigned long n) strnlen_user(const char * src, unsigned long n)
{ {
might_sleep();
#if 0 #if 0
__asm__ __volatile__ ( __asm__ __volatile__ (
" algr %0,%1\n" " algr %0,%1\n"
...@@ -574,6 +588,7 @@ extern long __clear_user_asm(void *to, long n); ...@@ -574,6 +588,7 @@ extern long __clear_user_asm(void *to, long n);
static inline unsigned long static inline unsigned long
clear_user(void *to, unsigned long n) clear_user(void *to, unsigned long n)
{ {
might_sleep();
if (access_ok(VERIFY_WRITE, to, n)) if (access_ok(VERIFY_WRITE, to, n))
n = __clear_user(to, n); n = __clear_user(to, n);
return n; return n;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment