Commit 1afe0375 authored by Andrew Morton's avatar Andrew Morton Committed by Linus Torvalds

[PATCH] s390: virtual timer interface.

From: Martin Schwidefsky <schwidefsky@de.ibm.com>

Add virtual timer interface.
parent 2e05bc63
...@@ -250,6 +250,12 @@ config SHARED_KERNEL ...@@ -250,6 +250,12 @@ config SHARED_KERNEL
You should only select this option if you know what you are You should only select this option if you know what you are
doing and want to exploit this feature. doing and want to exploit this feature.
config VIRT_TIMER
bool "Virtual CPU timer support"
help
This provides a kernel interface for virtual CPU timers.
Default is disabled.
endmenu endmenu
config PCMCIA config PCMCIA
......
...@@ -76,6 +76,7 @@ CONFIG_BINFMT_MISC=m ...@@ -76,6 +76,7 @@ CONFIG_BINFMT_MISC=m
# CONFIG_PROCESS_DEBUG is not set # CONFIG_PROCESS_DEBUG is not set
CONFIG_PFAULT=y CONFIG_PFAULT=y
# CONFIG_SHARED_KERNEL is not set # CONFIG_SHARED_KERNEL is not set
# CONFIG_VIRT_TIMER is not set
# CONFIG_PCMCIA is not set # CONFIG_PCMCIA is not set
# #
......
...@@ -40,6 +40,9 @@ ...@@ -40,6 +40,9 @@
#include <asm/io.h> #include <asm/io.h>
#include <asm/processor.h> #include <asm/processor.h>
#include <asm/irq.h> #include <asm/irq.h>
#ifdef CONFIG_VIRT_TIMER
#include <asm/timer.h>
#endif
asmlinkage void ret_from_fork(void) __asm__("ret_from_fork"); asmlinkage void ret_from_fork(void) __asm__("ret_from_fork");
...@@ -77,6 +80,14 @@ void default_idle(void) ...@@ -77,6 +80,14 @@ void default_idle(void)
return; return;
} }
#ifdef CONFIG_VIRT_TIMER
/*
* hook to stop timers that should not tick while CPU is idle
*/
if (stop_timers())
return;
#endif
/* /*
* Wait for external, I/O or machine check interrupt and * Wait for external, I/O or machine check interrupt and
* switch off machine check bit after the wait has ended. * switch off machine check bit after the wait has ended.
......
...@@ -111,6 +111,7 @@ void do_extint(struct pt_regs *regs, unsigned short code) ...@@ -111,6 +111,7 @@ void do_extint(struct pt_regs *regs, unsigned short code)
int index; int index;
irq_enter(); irq_enter();
asm volatile ("mc 0,0");
if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer) if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer)
account_ticks(regs); account_ticks(regs);
kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++; kstat_cpu(smp_processor_id()).irqs[EXTERNAL_INTERRUPT]++;
......
...@@ -19,6 +19,9 @@ ...@@ -19,6 +19,9 @@
#ifdef CONFIG_IP_MULTICAST #ifdef CONFIG_IP_MULTICAST
#include <net/arp.h> #include <net/arp.h>
#endif #endif
#ifdef CONFIG_VIRT_TIMER
#include <asm/timer.h>
#endif
/* /*
* memory management * memory management
...@@ -65,6 +68,17 @@ EXPORT_SYMBOL(overflowuid); ...@@ -65,6 +68,17 @@ EXPORT_SYMBOL(overflowuid);
EXPORT_SYMBOL(overflowgid); EXPORT_SYMBOL(overflowgid);
EXPORT_SYMBOL(empty_zero_page); EXPORT_SYMBOL(empty_zero_page);
/*
* virtual CPU timer
*/
#ifdef CONFIG_VIRT_TIMER
EXPORT_SYMBOL(init_virt_timer);
EXPORT_SYMBOL(add_virt_timer);
EXPORT_SYMBOL(add_virt_timer_periodic);
EXPORT_SYMBOL(mod_virt_timer);
EXPORT_SYMBOL(del_virt_timer);
#endif
/* /*
* misc. * misc.
*/ */
...@@ -77,5 +91,5 @@ EXPORT_SYMBOL(console_device); ...@@ -77,5 +91,5 @@ EXPORT_SYMBOL(console_device);
EXPORT_SYMBOL_NOVERS(do_call_softirq); EXPORT_SYMBOL_NOVERS(do_call_softirq);
EXPORT_SYMBOL(sys_wait4); EXPORT_SYMBOL(sys_wait4);
EXPORT_SYMBOL(cpcmd); EXPORT_SYMBOL(cpcmd);
EXPORT_SYMBOL(smp_call_function_on);
EXPORT_SYMBOL(sys_ioctl); EXPORT_SYMBOL(sys_ioctl);
...@@ -30,6 +30,7 @@ ...@@ -30,6 +30,7 @@
#include <linux/delay.h> #include <linux/delay.h>
#include <linux/cache.h> #include <linux/cache.h>
#include <linux/interrupt.h>
#include <asm/sigp.h> #include <asm/sigp.h>
#include <asm/pgalloc.h> #include <asm/pgalloc.h>
...@@ -65,7 +66,7 @@ extern char vmpoff_cmd[]; ...@@ -65,7 +66,7 @@ extern char vmpoff_cmd[];
extern void do_reipl(unsigned long devno); extern void do_reipl(unsigned long devno);
static sigp_ccode smp_ext_bitcall(int, ec_bit_sig); static void smp_ext_bitcall(int, ec_bit_sig);
static void smp_ext_bitcall_others(ec_bit_sig); static void smp_ext_bitcall_others(ec_bit_sig);
/* /*
...@@ -150,6 +151,59 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic, ...@@ -150,6 +151,59 @@ int smp_call_function (void (*func) (void *info), void *info, int nonatomic,
return 0; return 0;
} }
/*
* Call a function on one CPU
* cpu : the CPU the function should be executed on
*
* You must not call this function with disabled interrupts or from a
* hardware interrupt handler. You may call it from a bottom half.
*
* It is guaranteed that the called function runs on the specified CPU,
* preemption is disabled.
*/
int smp_call_function_on(void (*func) (void *info), void *info,
int nonatomic, int wait, int cpu)
{
struct call_data_struct data;
int curr_cpu;
if (!cpu_online(cpu))
return -EINVAL;
/* disable preemption for local function call */
curr_cpu = get_cpu();
if (curr_cpu == cpu) {
/* direct call to function */
func(info);
put_cpu();
return 0;
}
data.func = func;
data.info = info;
atomic_set(&data.started, 0);
data.wait = wait;
if (wait)
atomic_set(&data.finished, 0);
spin_lock_bh(&call_lock);
call_data = &data;
smp_ext_bitcall(cpu, ec_call_function);
/* Wait for response */
while (atomic_read(&data.started) != 1)
cpu_relax();
if (wait)
while (atomic_read(&data.finished) != 1)
cpu_relax();
spin_unlock_bh(&call_lock);
put_cpu();
return 0;
}
static inline void do_send_stop(void) static inline void do_send_stop(void)
{ {
u32 dummy; u32 dummy;
...@@ -305,16 +359,14 @@ void do_ext_call_interrupt(struct pt_regs *regs, __u16 code) ...@@ -305,16 +359,14 @@ void do_ext_call_interrupt(struct pt_regs *regs, __u16 code)
* Send an external call sigp to another cpu and return without waiting * Send an external call sigp to another cpu and return without waiting
* for its completion. * for its completion.
*/ */
static sigp_ccode smp_ext_bitcall(int cpu, ec_bit_sig sig) static void smp_ext_bitcall(int cpu, ec_bit_sig sig)
{ {
sigp_ccode ccode;
/* /*
* Set signaling bit in lowcore of target cpu and kick it * Set signaling bit in lowcore of target cpu and kick it
*/ */
set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast); set_bit(sig, (unsigned long *) &lowcore_ptr[cpu]->ext_call_fast);
ccode = signal_processor(cpu, sigp_external_call); while(signal_processor(cpu, sigp_external_call) == sigp_busy)
return ccode; udelay(10);
} }
/* /*
......
...@@ -24,16 +24,17 @@ ...@@ -24,16 +24,17 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/timex.h>
#include <linux/config.h>
#include <asm/uaccess.h> #include <asm/uaccess.h>
#include <asm/delay.h> #include <asm/delay.h>
#include <asm/s390_ext.h> #include <asm/s390_ext.h>
#include <asm/div64.h> #include <asm/div64.h>
#include <linux/timex.h>
#include <linux/config.h>
#include <asm/irq.h> #include <asm/irq.h>
#ifdef CONFIG_VIRT_TIMER
#include <asm/timer.h>
#endif
/* change this if you have some constant time drift */ /* change this if you have some constant time drift */
#define USECS_PER_JIFFY ((unsigned long) 1000000/HZ) #define USECS_PER_JIFFY ((unsigned long) 1000000/HZ)
...@@ -51,13 +52,19 @@ u64 jiffies_64 = INITIAL_JIFFIES; ...@@ -51,13 +52,19 @@ u64 jiffies_64 = INITIAL_JIFFIES;
EXPORT_SYMBOL(jiffies_64); EXPORT_SYMBOL(jiffies_64);
static ext_int_info_t ext_int_info_timer; static ext_int_info_t ext_int_info_cc;
static u64 init_timer_cc; static u64 init_timer_cc;
static u64 jiffies_timer_cc; static u64 jiffies_timer_cc;
static u64 xtime_cc; static u64 xtime_cc;
extern unsigned long wall_jiffies; extern unsigned long wall_jiffies;
#ifdef CONFIG_VIRT_TIMER
#define VTIMER_MAGIC (0x4b87ad6e + 1)
static ext_int_info_t ext_int_info_timer;
DEFINE_PER_CPU(struct vtimer_queue, virt_cpu_timer);
#endif
/* /*
* Scheduler clock - returns current time in nanosec units. * Scheduler clock - returns current time in nanosec units.
*/ */
...@@ -226,13 +233,208 @@ void account_ticks(struct pt_regs *regs) ...@@ -226,13 +233,208 @@ void account_ticks(struct pt_regs *regs)
#endif #endif
} }
#ifdef CONFIG_VIRT_TIMER
void start_cpu_timer(void)
{
struct vtimer_queue *vt_list;
vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
set_vtimer(vt_list->idle);
}
int stop_cpu_timer(void)
{
__u64 done;
struct vtimer_queue *vt_list;
vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
/* nothing to do */
if (list_empty(&vt_list->list)) {
vt_list->idle = VTIMER_MAX_SLICE;
goto fire;
}
/* store progress */
asm volatile ("STPT %0" : "=m" (done));
/*
* If done is negative we do not stop the CPU timer
* because we will get instantly an interrupt that
* will start the CPU timer again.
*/
if (done & 1LL<<63)
return 1;
else
vt_list->offset += vt_list->to_expire - done;
/* save the actual expire value */
vt_list->idle = done;
/*
* We cannot halt the CPU timer, we just write a value that
* nearly never expires (only after 71 years) and re-write
* the stored expire value if we continue the timer
*/
fire:
set_vtimer(VTIMER_MAX_SLICE);
return 0;
}
void do_monitor_call(struct pt_regs *regs, long interruption_code)
{
/* disable monitor call class 0 */
__ctl_clear_bit(8, 15);
start_cpu_timer();
}
/*
* called from cpu_idle to stop any timers
* returns 1 if CPU should not be stopped
*/
int stop_timers(void)
{
if (stop_cpu_timer())
return 1;
/* enable monitor call class 0 */
__ctl_set_bit(8, 15);
return 0;
}
void set_vtimer(__u64 expires)
{
asm volatile ("SPT %0" : : "m" (expires));
/* store expire time for this CPU timer */
per_cpu(virt_cpu_timer, smp_processor_id()).to_expire = expires;
}
/*
* Sorted add to a list. List is linear searched until first bigger
* element is found.
*/
void list_add_sorted(struct vtimer_list *timer, struct list_head *head)
{
struct vtimer_list *event;
list_for_each_entry(event, head, entry) {
if (event->expires > timer->expires) {
list_add_tail(&timer->entry, &event->entry);
return;
}
}
list_add_tail(&timer->entry, head);
}
/*
* Do the callback functions of expired vtimer events.
* Called from within the interrupt handler.
*/
static void do_callbacks(struct list_head *cb_list, struct pt_regs *regs)
{
struct vtimer_queue *vt_list;
struct vtimer_list *event, *tmp;
void (*fn)(unsigned long, struct pt_regs*);
unsigned long data;
if (list_empty(cb_list))
return;
vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
list_for_each_entry_safe(event, tmp, cb_list, entry) {
fn = event->function;
data = event->data;
fn(data, regs);
if (!event->interval)
/* delete one shot timer */
list_del_init(&event->entry);
else {
/* move interval timer back to list */
spin_lock(&vt_list->lock);
list_del_init(&event->entry);
list_add_sorted(event, &vt_list->list);
spin_unlock(&vt_list->lock);
}
}
}
/*
* Handler for the virtual CPU timer.
*/
static void do_cpu_timer_interrupt(struct pt_regs *regs, __u16 error_code)
{
int cpu;
__u64 next, delta;
struct vtimer_queue *vt_list;
struct vtimer_list *event, *tmp;
struct list_head *ptr;
/* the callback queue */
struct list_head cb_list;
INIT_LIST_HEAD(&cb_list);
cpu = smp_processor_id();
vt_list = &per_cpu(virt_cpu_timer, cpu);
/* walk timer list, fire all expired events */
spin_lock(&vt_list->lock);
if (vt_list->to_expire < VTIMER_MAX_SLICE)
vt_list->offset += vt_list->to_expire;
list_for_each_entry_safe(event, tmp, &vt_list->list, entry) {
if (event->expires > vt_list->offset)
/* found first unexpired event, leave */
break;
/* re-charge interval timer, we have to add the offset */
if (event->interval)
event->expires = event->interval + vt_list->offset;
/* move expired timer to the callback queue */
list_move_tail(&event->entry, &cb_list);
}
spin_unlock(&vt_list->lock);
do_callbacks(&cb_list, regs);
/* next event is first in list */
spin_lock(&vt_list->lock);
if (!list_empty(&vt_list->list)) {
ptr = vt_list->list.next;
event = list_entry(ptr, struct vtimer_list, entry);
next = event->expires - vt_list->offset;
/* add the expired time from this interrupt handler
* and the callback functions
*/
asm volatile ("STPT %0" : "=m" (delta));
delta = 0xffffffffffffffffLL - delta + 1;
vt_list->offset += delta;
next -= delta;
} else {
vt_list->offset = 0;
next = VTIMER_MAX_SLICE;
}
spin_unlock(&vt_list->lock);
set_vtimer(next);
}
#endif
/* /*
* Start the clock comparator on the current CPU. * Start the clock comparator and the virtual CPU timer
* on the current CPU.
*/ */
void init_cpu_timer(void) void init_cpu_timer(void)
{ {
unsigned long cr0; unsigned long cr0;
__u64 timer; __u64 timer;
#ifdef CONFIG_VIRT_TIMER
struct vtimer_queue *vt_list;
#endif
timer = jiffies_timer_cc + jiffies_64 * CLK_TICKS_PER_JIFFY; timer = jiffies_timer_cc + jiffies_64 * CLK_TICKS_PER_JIFFY;
S390_lowcore.jiffy_timer = timer + CLK_TICKS_PER_JIFFY; S390_lowcore.jiffy_timer = timer + CLK_TICKS_PER_JIFFY;
...@@ -242,6 +444,22 @@ void init_cpu_timer(void) ...@@ -242,6 +444,22 @@ void init_cpu_timer(void)
__ctl_store(cr0, 0, 0); __ctl_store(cr0, 0, 0);
cr0 |= 0x800; cr0 |= 0x800;
__ctl_load(cr0, 0, 0); __ctl_load(cr0, 0, 0);
#ifdef CONFIG_VIRT_TIMER
/* kick the virtual timer */
timer = VTIMER_MAX_SLICE;
asm volatile ("SPT %0" : : "m" (timer));
__ctl_store(cr0, 0, 0);
cr0 |= 0x400;
__ctl_load(cr0, 0, 0);
vt_list = &per_cpu(virt_cpu_timer, smp_processor_id());
INIT_LIST_HEAD(&vt_list->list);
spin_lock_init(&vt_list->lock);
vt_list->to_expire = 0;
vt_list->offset = 0;
vt_list->idle = 0;
#endif
} }
/* /*
...@@ -281,11 +499,252 @@ void __init time_init(void) ...@@ -281,11 +499,252 @@ void __init time_init(void)
set_normalized_timespec(&wall_to_monotonic, set_normalized_timespec(&wall_to_monotonic,
-xtime.tv_sec, -xtime.tv_nsec); -xtime.tv_sec, -xtime.tv_nsec);
/* request the 0x1004 external interrupt */ /* request the clock comparator external interrupt */
if (register_early_external_interrupt(0x1004, 0, if (register_early_external_interrupt(0x1004, 0,
&ext_int_info_timer) != 0) &ext_int_info_cc) != 0)
panic("Couldn't request external interrupt 0x1004"); panic("Couldn't request external interrupt 0x1004");
/* init CPU timer */ #ifdef CONFIG_VIRT_TIMER
/* request the cpu timer external interrupt */
if (register_early_external_interrupt(0x1005, do_cpu_timer_interrupt,
&ext_int_info_timer) != 0)
panic("Couldn't request external interrupt 0x1005");
#endif
init_cpu_timer(); init_cpu_timer();
} }
#ifdef CONFIG_VIRT_TIMER
void init_virt_timer(struct vtimer_list *timer)
{
timer->magic = VTIMER_MAGIC;
timer->function = NULL;
INIT_LIST_HEAD(&timer->entry);
spin_lock_init(&timer->lock);
}
static inline int check_vtimer(struct vtimer_list *timer)
{
if (timer->magic != VTIMER_MAGIC)
return -EINVAL;
return 0;
}
static inline int vtimer_pending(struct vtimer_list *timer)
{
return (!list_empty(&timer->entry));
}
/*
* this function should only run on the specified CPU
*/
static void internal_add_vtimer(struct vtimer_list *timer)
{
unsigned long flags;
__u64 done;
struct vtimer_list *event;
struct vtimer_queue *vt_list;
vt_list = &per_cpu(virt_cpu_timer, timer->cpu);
spin_lock_irqsave(&vt_list->lock, flags);
if (timer->cpu != smp_processor_id())
printk("internal_add_vtimer: BUG, running on wrong CPU");
/* if list is empty we only have to set the timer */
if (list_empty(&vt_list->list)) {
/* reset the offset, this may happen if the last timer was
* just deleted by mod_virt_timer and the interrupt
* didn't happen until here
*/
vt_list->offset = 0;
goto fire;
}
/* save progress */
asm volatile ("STPT %0" : "=m" (done));
/* calculate completed work */
done = vt_list->to_expire - done + vt_list->offset;
vt_list->offset = 0;
list_for_each_entry(event, &vt_list->list, entry)
event->expires -= done;
fire:
list_add_sorted(timer, &vt_list->list);
/* get first element, which is the next vtimer slice */
event = list_entry(vt_list->list.next, struct vtimer_list, entry);
set_vtimer(event->expires);
spin_unlock_irqrestore(&vt_list->lock, flags);
/* release CPU aquired in prepare_vtimer or mod_virt_timer() */
put_cpu();
}
static inline int prepare_vtimer(struct vtimer_list *timer)
{
if (check_vtimer(timer) || !timer->function) {
printk("add_virt_timer: uninitialized timer\n");
return -EINVAL;
}
if (!timer->expires || timer->expires > VTIMER_MAX_SLICE) {
printk("add_virt_timer: invalid timer expire value!\n");
return -EINVAL;
}
if (vtimer_pending(timer)) {
printk("add_virt_timer: timer pending\n");
return -EBUSY;
}
timer->cpu = get_cpu();
return 0;
}
/*
* add_virt_timer - add an oneshot virtual CPU timer
*/
void add_virt_timer(void *new)
{
struct vtimer_list *timer;
timer = (struct vtimer_list *)new;
if (prepare_vtimer(timer) < 0)
return;
timer->interval = 0;
internal_add_vtimer(timer);
}
/*
* add_virt_timer_int - add an interval virtual CPU timer
*/
void add_virt_timer_periodic(void *new)
{
struct vtimer_list *timer;
timer = (struct vtimer_list *)new;
if (prepare_vtimer(timer) < 0)
return;
timer->interval = timer->expires;
internal_add_vtimer(timer);
}
/*
* If we change a pending timer the function must be called on the CPU
* where the timer is running on, e.g. by smp_call_function_on()
*
* The original mod_timer adds the timer if it is not pending. For compatibility
* we do the same. The timer will be added on the current CPU as a oneshot timer.
*
* returns whether it has modified a pending timer (1) or not (0)
*/
int mod_virt_timer(struct vtimer_list *timer, __u64 expires)
{
struct vtimer_queue *vt_list;
unsigned long flags;
int cpu;
if (check_vtimer(timer) || !timer->function) {
printk("mod_virt_timer: uninitialized timer\n");
return -EINVAL;
}
if (!expires || expires > VTIMER_MAX_SLICE) {
printk("mod_virt_timer: invalid expire range\n");
return -EINVAL;
}
/*
* This is a common optimization triggered by the
* networking code - if the timer is re-modified
* to be the same thing then just return:
*/
if (timer->expires == expires && vtimer_pending(timer))
return 1;
cpu = get_cpu();
vt_list = &per_cpu(virt_cpu_timer, cpu);
/* disable interrupts before test if timer is pending */
spin_lock_irqsave(&vt_list->lock, flags);
/* if timer isn't pending add it on the current CPU */
if (!vtimer_pending(timer)) {
spin_unlock_irqrestore(&vt_list->lock, flags);
/* we do not activate an interval timer with mod_virt_timer */
timer->interval = 0;
timer->expires = expires;
timer->cpu = cpu;
internal_add_vtimer(timer);
return 0;
}
/* check if we run on the right CPU */
if (timer->cpu != cpu) {
printk("mod_virt_timer: running on wrong CPU, check your code\n");
spin_unlock_irqrestore(&vt_list->lock, flags);
put_cpu();
return -EINVAL;
}
list_del_init(&timer->entry);
timer->expires = expires;
/* also change the interval if we have an interval timer */
if (timer->interval)
timer->interval = expires;
/* the timer can't expire anymore so we can release the lock */
spin_unlock_irqrestore(&vt_list->lock, flags);
internal_add_vtimer(timer);
return 1;
}
/*
* delete a virtual timer
*
* returns whether the deleted timer was pending (1) or not (0)
*/
int del_virt_timer(struct vtimer_list *timer)
{
unsigned long flags;
struct vtimer_queue *vt_list;
if (check_vtimer(timer)) {
printk("del_virt_timer: timer not initialized\n");
return -EINVAL;
}
/* check if timer is pending */
if (!vtimer_pending(timer))
return 0;
if (!cpu_online(timer->cpu)) {
printk("del_virt_timer: CPU not present!\n");
return -1;
}
vt_list = &per_cpu(virt_cpu_timer, timer->cpu);
spin_lock_irqsave(&vt_list->lock, flags);
/* we don't interrupt a running timer, just let it expire! */
list_del_init(&timer->entry);
/* last timer removed */
if (list_empty(&vt_list->list)) {
vt_list->to_expire = 0;
vt_list->offset = 0;
}
spin_unlock_irqrestore(&vt_list->lock, flags);
return 1;
}
#endif
...@@ -64,6 +64,9 @@ extern void pfault_fini(void); ...@@ -64,6 +64,9 @@ extern void pfault_fini(void);
extern void pfault_interrupt(struct pt_regs *regs, __u16 error_code); extern void pfault_interrupt(struct pt_regs *regs, __u16 error_code);
static ext_int_info_t ext_int_pfault; static ext_int_info_t ext_int_pfault;
#endif #endif
#ifdef CONFIG_VIRT_TIMER
extern pgm_check_handler_t do_monitor_call;
#endif
#define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; }) #define stack_pointer ({ void **sp; asm("la %0,0(15)" : "=&d" (sp)); sp; })
...@@ -625,6 +628,9 @@ void __init trap_init(void) ...@@ -625,6 +628,9 @@ void __init trap_init(void)
#endif /* CONFIG_ARCH_S390X */ #endif /* CONFIG_ARCH_S390X */
pgm_check_table[0x15] = &operand_exception; pgm_check_table[0x15] = &operand_exception;
pgm_check_table[0x1C] = &privileged_op; pgm_check_table[0x1C] = &privileged_op;
#ifdef CONFIG_VIRT_TIMER
pgm_check_table[0x40] = &do_monitor_call;
#endif
if (MACHINE_IS_VM) { if (MACHINE_IS_VM) {
/* /*
* First try to get pfault pseudo page faults going. * First try to get pfault pseudo page faults going.
......
...@@ -607,6 +607,7 @@ do_IRQ (struct pt_regs *regs) ...@@ -607,6 +607,7 @@ do_IRQ (struct pt_regs *regs)
struct irb *irb; struct irb *irb;
irq_enter (); irq_enter ();
asm volatile ("mc 0,0");
if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer) if (S390_lowcore.int_clock >= S390_lowcore.jiffy_timer)
account_ticks(regs); account_ticks(regs);
/* /*
......
...@@ -29,6 +29,9 @@ typedef struct ...@@ -29,6 +29,9 @@ typedef struct
__u16 cpu; __u16 cpu;
} sigp_info; } sigp_info;
extern int smp_call_function_on(void (*func) (void *info), void *info,
int nonatomic, int wait, int cpu);
extern cpumask_t cpu_online_map; extern cpumask_t cpu_online_map;
extern cpumask_t cpu_possible_map; extern cpumask_t cpu_possible_map;
...@@ -61,4 +64,9 @@ extern __inline__ __u16 hard_smp_processor_id(void) ...@@ -61,4 +64,9 @@ extern __inline__ __u16 hard_smp_processor_id(void)
#define cpu_logical_map(cpu) (cpu) #define cpu_logical_map(cpu) (cpu)
#endif #endif
#ifndef CONFIG_SMP
#define smp_call_function_on(func,info,nonatomic,wait,cpu) ({ 0; })
#endif
#endif #endif
/*
* include/asm-s390/timer.h
*
* (C) Copyright IBM Corp. 2003
* Virtual CPU timer
*
* Author: Jan Glauber (jang@de.ibm.com)
*/
#ifndef _ASM_S390_TIMER_H
#define _ASM_S390_TIMER_H
#include <linux/timer.h>
#define VTIMER_MAX_SLICE (0x7ffffffffffff000LL)
struct vtimer_list {
struct list_head entry;
int cpu;
__u64 expires;
__u64 interval;
spinlock_t lock;
unsigned long magic;
void (*function)(unsigned long, struct pt_regs*);
unsigned long data;
};
/* the offset value will wrap after ca. 71 years */
struct vtimer_queue {
struct list_head list;
spinlock_t lock;
__u64 to_expire; /* current event expire time */
__u64 offset; /* list offset to zero */
__u64 idle; /* temp var for idle */
};
void set_vtimer(__u64 expires);
extern void init_virt_timer(struct vtimer_list *timer);
extern void add_virt_timer(void *new);
extern void add_virt_timer_periodic(void *new);
extern int mod_virt_timer(struct vtimer_list *timer, __u64 expires);
extern int del_virt_timer(struct vtimer_list *timer);
int stop_timers(void);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment