Commit 3acfe790 authored by David S. Miller's avatar David S. Miller Committed by David S. Miller

Sparc64 preemption support.

parent 1345a9e6
......@@ -713,8 +713,8 @@ floppy_dosoftint:
call sparc_floppy_irq
add %sp, STACK_BIAS + REGWIN_SZ, %o2
b,pt %xcc, rtrap
clr %l6
b,pt %xcc, rtrap_irq
nop
#endif /* CONFIG_BLK_DEV_FD */
......@@ -883,7 +883,7 @@ cee_trap:
mov %l5, %o1
call cee_log
add %sp, STACK_BIAS + REGWIN_SZ, %o2
ba,a,pt %xcc, rtrap_clr_l6
ba,a,pt %xcc, rtrap_irq
/* Capture I/D/E-cache state into per-cpu error scoreboard.
*
......@@ -1109,7 +1109,7 @@ cheetah_fast_ecc:
mov %l5, %o2
call cheetah_fecc_handler
add %sp, STACK_BIAS + REGWIN_SZ, %o0
ba,a,pt %xcc, rtrap_clr_l6
ba,a,pt %xcc, rtrap_irq
/* Our caller has disabled I-cache and performed membar Sync. */
.globl cheetah_cee
......@@ -1135,7 +1135,7 @@ cheetah_cee:
mov %l5, %o2
call cheetah_cee_handler
add %sp, STACK_BIAS + REGWIN_SZ, %o0
ba,a,pt %xcc, rtrap_clr_l6
ba,a,pt %xcc, rtrap_irq
/* Our caller has disabled I-cache+D-cache and performed membar Sync. */
.globl cheetah_deferred_trap
......@@ -1161,7 +1161,7 @@ cheetah_deferred_trap:
mov %l5, %o2
call cheetah_deferred_handler
add %sp, STACK_BIAS + REGWIN_SZ, %o0
ba,a,pt %xcc, rtrap_clr_l6
ba,a,pt %xcc, rtrap_irq
.globl __do_privact
__do_privact:
......
......@@ -5,6 +5,8 @@
* Copyright (C) 1997, 1998, 1999 Jakub Jelinek (jj@ultra.linux.cz)
*/
#include <linux/config.h>
#include <asm/asi.h>
#include <asm/pstate.h>
#include <asm/ptrace.h>
......@@ -25,9 +27,17 @@
.text
.align 64
.globl etrap, etrap_irq, etraptl1
#ifdef CONFIG_PREEMPT
etrap_irq: ldsw [%g6 + TI_PRE_COUNT], %g1
add %g1, 1, %g1
ba,pt etrap_irq2
stw %g1, [%g6 + TI_PRE_COUNT]
#endif
etrap: rdpr %pil, %g2 ! Single Group
etrap_irq: rdpr %tstate, %g1 ! Single Group
#ifndef CONFIG_PREEMPT
etrap_irq
#endif
etrap_irq2: rdpr %tstate, %g1 ! Single Group
sllx %g2, 20, %g3 ! IEU0 Group
andcc %g1, TSTATE_PRIV, %g0 ! IEU1
or %g1, %g3, %g1 ! IEU0 Group
......
......@@ -100,6 +100,18 @@ int cpu_idle(void)
#endif
#ifdef CONFIG_PREEMPT
void kpreempt_maybe(void)
{
int cpu = smp_processor_id();
if (local_irq_count(cpu) == 0 &&
local_bh_count(cpu) == 0)
preempt_schedule();
current_thread_info()->preempt_count--;
}
#endif
extern char reboot_command [];
#ifdef CONFIG_SUN_CONSOLE
......
......@@ -5,6 +5,8 @@
* Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
*/
#include <linux/config.h>
#include <asm/asi.h>
#include <asm/pstate.h>
#include <asm/ptrace.h>
......@@ -148,7 +150,13 @@ __handle_signal:
andn %l1, %l4, %l1
.align 64
.globl rtrap_clr_l6, rtrap, irqsz_patchme
.globl rtrap_irq, rtrap_clr_l6, rtrap, irqsz_patchme
rtrap_irq:
#ifdef CONFIG_PREEMPT
ldsw [%g6 + TI_PRE_COUNT], %l0
sub %l0, 1, %l0
stw %l0, [%g6 + TI_PRE_COUNT]
#endif
rtrap_clr_l6: clr %l6
rtrap: ldub [%g6 + TI_CPU], %l0
sethi %hi(irq_stat), %l2 ! &softirq_active
......@@ -261,7 +269,18 @@ rt_continue: ldx [%sp + PTREGS_OFF + PT_V9_G1], %g1
kern_rtt: restore
retry
to_kernel: ldub [%g6 + TI_FPDEPTH], %l5
to_kernel:
#ifdef CONFIG_PREEMPT
ldsw [%g6 + TI_PRE_COUNT], %l5
brnz %l5, kern_fpucheck
add %l5, 1, %l6
stw %l6, [%g6 + TI_PRE_COUNT]
call kpreempt_maybe
wrpr %g0, RTRAP_PSTATE, %pstate
wrpr %g0, RTRAP_PSTATE_IRQOFF, %pstate
stw %l5, [%g6 + TI_PRE_COUNT]
#endif
kern_fpucheck: ldub [%g6 + TI_FPDEPTH], %l5
brz,pt %l5, rt_continue
srl %l5, 1, %o0
add %g6, TI_FPSAVED, %l6
......
......@@ -902,7 +902,7 @@ void smp_migrate_task(int cpu, task_t *p)
if (smp_processors_ready && (cpu_present_map & mask) != 0) {
u64 data0 = (((u64)&xcall_migrate_task) & 0xffffffff);
spin_lock(&migration_lock);
_raw_spin_lock(&migration_lock);
new_task = p;
if (tlb_type == spitfire)
......@@ -923,7 +923,7 @@ asmlinkage void smp_task_migration_interrupt(int irq, struct pt_regs *regs)
clear_softint(1 << irq);
p = new_task;
spin_unlock(&migration_lock);
_raw_spin_unlock(&migration_lock);
sched_task_migrated(p);
}
......
......@@ -1700,6 +1700,7 @@ void trap_init(void)
TI_KERN_CNTD1 != offsetof(struct thread_info, kernel_cntd1) ||
TI_PCR != offsetof(struct thread_info, pcr_reg) ||
TI_CEE_STUFF != offsetof(struct thread_info, cee_stuff) ||
TI_PRE_COUNT != offsetof(struct thread_info, preempt_count) ||
TI_FPREGS != offsetof(struct thread_info, fpregs) ||
(TI_FPREGS & (64 - 1)))
thread_info_offsets_are_bolixed_dave();
......
......@@ -18,6 +18,10 @@
/* On entry: %o5=current FPRS value, %g7 is callers address */
/* May clobber %o5, %g1, %g2, %g3, %g7, %icc, %xcc */
/* Nothing special need be done here to handle pre-emption, this
* FPU save/restore mechanism is already preemption safe.
*/
.align 32
VISenter:
ldub [%g6 + TI_FPDEPTH], %g1
......
......@@ -493,8 +493,8 @@ xcall_report_regs:
109: or %g7, %lo(109b), %g7
call __show_regs
add %sp, STACK_BIAS + REGWIN_SZ, %o0
b,pt %xcc, rtrap
clr %l6
b,pt %xcc, rtrap_irq
nop
.align 32
.globl xcall_flush_dcache_page_cheetah
......@@ -554,8 +554,8 @@ xcall_capture:
109: or %g7, %lo(109b), %g7
call smp_penguin_jailcell
nop
b,pt %xcc, rtrap
clr %l6
b,pt %xcc, rtrap_irq
nop
.globl xcall_promstop
xcall_promstop:
......@@ -681,8 +681,8 @@ xcall_call_function:
109: or %g7, %lo(109b), %g7
call smp_call_function_client
nop
b,pt %xcc, rtrap
clr %l6
b,pt %xcc, rtrap_irq
nop
.globl xcall_migrate_task
xcall_migrate_task:
......
......@@ -158,6 +158,7 @@ extern __inline__ void free_pgd_fast(pgd_t *pgd)
{
struct page *page = virt_to_page(pgd);
preempt_disable();
if (!page->pprev_hash) {
(unsigned long *)page->next_hash = pgd_quicklist;
pgd_quicklist = (unsigned long *)page;
......@@ -165,12 +166,14 @@ extern __inline__ void free_pgd_fast(pgd_t *pgd)
(unsigned long)page->pprev_hash |=
(((unsigned long)pgd & (PAGE_SIZE / 2)) ? 2 : 1);
pgd_cache_size++;
preempt_enable();
}
extern __inline__ pgd_t *get_pgd_fast(void)
{
struct page *ret;
preempt_disable();
if ((ret = (struct page *)pgd_quicklist) != NULL) {
unsigned long mask = (unsigned long)ret->pprev_hash;
unsigned long off = 0;
......@@ -186,16 +189,22 @@ extern __inline__ pgd_t *get_pgd_fast(void)
pgd_quicklist = (unsigned long *)ret->next_hash;
ret = (struct page *)(__page_address(ret) + off);
pgd_cache_size--;
preempt_enable();
} else {
struct page *page = alloc_page(GFP_KERNEL);
struct page *page;
preempt_enable();
page = alloc_page(GFP_KERNEL);
if (page) {
ret = (struct page *)page_address(page);
clear_page(ret);
(unsigned long)page->pprev_hash = 2;
preempt_disable();
(unsigned long *)page->next_hash = pgd_quicklist;
pgd_quicklist = (unsigned long *)page;
pgd_cache_size++;
preempt_enable();
}
}
return (pgd_t *)ret;
......@@ -205,20 +214,25 @@ extern __inline__ pgd_t *get_pgd_fast(void)
extern __inline__ void free_pgd_fast(pgd_t *pgd)
{
preempt_disable();
*(unsigned long *)pgd = (unsigned long) pgd_quicklist;
pgd_quicklist = (unsigned long *) pgd;
pgtable_cache_size++;
preempt_enable();
}
extern __inline__ pgd_t *get_pgd_fast(void)
{
unsigned long *ret;
preempt_disable();
if((ret = pgd_quicklist) != NULL) {
pgd_quicklist = (unsigned long *)(*ret);
ret[0] = 0;
pgtable_cache_size--;
preempt_enable();
} else {
preempt_enable();
ret = (unsigned long *) __get_free_page(GFP_KERNEL);
if(ret)
memset(ret, 0, PAGE_SIZE);
......@@ -258,20 +272,27 @@ extern __inline__ pmd_t *pmd_alloc_one_fast(struct mm_struct *mm, unsigned long
if (pte_quicklist[color] == NULL)
color = 1;
preempt_disable();
if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
pte_quicklist[color] = (unsigned long *)(*ret);
ret[0] = 0;
pgtable_cache_size--;
}
preempt_enable();
return (pmd_t *)ret;
}
extern __inline__ void free_pmd_fast(pmd_t *pmd)
{
unsigned long color = DCACHE_COLOR((unsigned long)pmd);
preempt_disable();
*(unsigned long *)pmd = (unsigned long) pte_quicklist[color];
pte_quicklist[color] = (unsigned long *) pmd;
pgtable_cache_size++;
preempt_enable();
}
extern __inline__ void free_pmd_slow(pmd_t *pmd)
......@@ -288,20 +309,25 @@ extern __inline__ pte_t *pte_alloc_one_fast(struct mm_struct *mm, unsigned long
unsigned long color = VPTE_COLOR(address);
unsigned long *ret;
preempt_disable();
if((ret = (unsigned long *)pte_quicklist[color]) != NULL) {
pte_quicklist[color] = (unsigned long *)(*ret);
ret[0] = 0;
pgtable_cache_size--;
}
preempt_enable();
return (pte_t *)ret;
}
extern __inline__ void free_pte_fast(pte_t *pte)
{
unsigned long color = DCACHE_COLOR((unsigned long)pte);
preempt_disable();
*(unsigned long *)pte = (unsigned long) pte_quicklist[color];
pte_quicklist[color] = (unsigned long *) pte;
pgtable_cache_size++;
preempt_enable();
}
extern __inline__ void free_pte_slow(pte_t *pte)
......
......@@ -9,9 +9,17 @@
extern spinlock_t kernel_flag;
#ifdef CONFIG_SMP
#define kernel_locked() \
(spin_is_locked(&kernel_flag) &&\
(current->lock_depth >= 0))
#else
#ifdef CONFIG_PREEMPT
#define kernel_locked() preempt_get_count()
#else
#define kernel_locked() 1
#endif
#endif
/*
* Release global kernel lock and global interrupt lock
......
......@@ -10,14 +10,15 @@
#include <asm/hardirq.h>
#include <asm/system.h> /* for membar() */
#define local_bh_disable() (local_bh_count(smp_processor_id())++)
#define __local_bh_enable() (local_bh_count(smp_processor_id())--)
#define local_bh_disable() do { barrier(); preempt_disable(); local_bh_count(smp_processor_id())++; } while (0)
#define __local_bh_enable() do { local_bh_count(smp_processor_id())--; preempt_enable(); barrier(); } while (0)
#define local_bh_enable() \
do { if (!--local_bh_count(smp_processor_id()) && \
softirq_pending(smp_processor_id())) { \
do_softirq(); \
__sti(); \
} \
preempt_enable(); \
} while (0)
#define in_softirq() (local_bh_count(smp_processor_id()) != 0)
......
......@@ -40,7 +40,7 @@ typedef unsigned char spinlock_t;
do { membar("#LoadLoad"); \
} while(*((volatile unsigned char *)lock))
extern __inline__ void spin_lock(spinlock_t *lock)
extern __inline__ void _raw_spin_lock(spinlock_t *lock)
{
__asm__ __volatile__(
"1: ldstub [%0], %%g7\n"
......@@ -57,7 +57,7 @@ extern __inline__ void spin_lock(spinlock_t *lock)
: "g7", "memory");
}
extern __inline__ int spin_trylock(spinlock_t *lock)
extern __inline__ int _raw_spin_trylock(spinlock_t *lock)
{
unsigned int result;
__asm__ __volatile__("ldstub [%1], %0\n\t"
......@@ -68,7 +68,7 @@ extern __inline__ int spin_trylock(spinlock_t *lock)
return (result == 0);
}
extern __inline__ void spin_unlock(spinlock_t *lock)
extern __inline__ void _raw_spin_unlock(spinlock_t *lock)
{
__asm__ __volatile__("membar #StoreStore | #LoadStore\n\t"
"stb %%g0, [%0]"
......@@ -99,9 +99,9 @@ extern void _do_spin_lock (spinlock_t *lock, char *str);
extern void _do_spin_unlock (spinlock_t *lock);
extern int _spin_trylock (spinlock_t *lock);
#define spin_trylock(lp) _spin_trylock(lp)
#define spin_lock(lock) _do_spin_lock(lock, "spin_lock")
#define spin_unlock(lock) _do_spin_unlock(lock)
#define _raw_spin_trylock(lp) _spin_trylock(lp)
#define _raw_spin_lock(lock) _do_spin_lock(lock, "spin_lock")
#define _raw_spin_unlock(lock) _do_spin_unlock(lock)
#endif /* CONFIG_DEBUG_SPINLOCK */
......@@ -118,10 +118,10 @@ extern void __read_unlock(rwlock_t *);
extern void __write_lock(rwlock_t *);
extern void __write_unlock(rwlock_t *);
#define read_lock(p) __read_lock(p)
#define read_unlock(p) __read_unlock(p)
#define write_lock(p) __write_lock(p)
#define write_unlock(p) __write_unlock(p)
#define _raw_read_lock(p) __read_lock(p)
#define _raw_read_unlock(p) __read_unlock(p)
#define _raw_write_lock(p) __write_lock(p)
#define _raw_write_unlock(p) __write_unlock(p)
#else /* !(CONFIG_DEBUG_SPINLOCK) */
......@@ -138,28 +138,28 @@ extern void _do_read_unlock(rwlock_t *rw, char *str);
extern void _do_write_lock(rwlock_t *rw, char *str);
extern void _do_write_unlock(rwlock_t *rw);
#define read_lock(lock) \
#define _raw_read_lock(lock) \
do { unsigned long flags; \
__save_and_cli(flags); \
_do_read_lock(lock, "read_lock"); \
__restore_flags(flags); \
} while(0)
#define read_unlock(lock) \
#define _raw_read_unlock(lock) \
do { unsigned long flags; \
__save_and_cli(flags); \
_do_read_unlock(lock, "read_unlock"); \
__restore_flags(flags); \
} while(0)
#define write_lock(lock) \
#define _raw_write_lock(lock) \
do { unsigned long flags; \
__save_and_cli(flags); \
_do_write_lock(lock, "write_lock"); \
__restore_flags(flags); \
} while(0)
#define write_unlock(lock) \
#define _raw_write_unlock(lock) \
do { unsigned long flags; \
__save_and_cli(flags); \
_do_write_unlock(lock); \
......
......@@ -42,8 +42,10 @@ struct thread_info {
/* D$ line 2 */
unsigned long fault_address;
struct pt_regs *kregs;
unsigned long *utraps;
struct exec_domain *exec_domain;
int preempt_count;
unsigned long *utraps;
struct reg_window reg_window[NSWINS];
unsigned long rwbuf_stkptrs[NSWINS];
......@@ -76,18 +78,19 @@ struct thread_info {
#define TI_KSP 0x00000018
#define TI_FAULT_ADDR 0x00000020
#define TI_KREGS 0x00000028
#define TI_UTRAPS 0x00000030
#define TI_EXEC_DOMAIN 0x00000038
#define TI_REG_WINDOW 0x00000040
#define TI_RWIN_SPTRS 0x000003c0
#define TI_GSR 0x000003f8
#define TI_XFSR 0x00000430
#define TI_USER_CNTD0 0x00000468
#define TI_USER_CNTD1 0x00000470
#define TI_KERN_CNTD0 0x00000478
#define TI_KERN_CNTD1 0x00000480
#define TI_PCR 0x00000488
#define TI_CEE_STUFF 0x00000490
#define TI_EXEC_DOMAIN 0x00000030
#define TI_PRE_COUNT 0x00000038
#define TI_UTRAPS 0x00000040
#define TI_REG_WINDOW 0x00000048
#define TI_RWIN_SPTRS 0x000003c8
#define TI_GSR 0x00000400
#define TI_XFSR 0x00000438
#define TI_USER_CNTD0 0x00000470
#define TI_USER_CNTD1 0x00000478
#define TI_KERN_CNTD0 0x00000480
#define TI_KERN_CNTD1 0x00000488
#define TI_PCR 0x00000490
#define TI_CEE_STUFF 0x00000498
#define TI_FPREGS 0x000004c0
/* We embed this in the uppermost byte of thread_info->flags */
......
......@@ -140,7 +140,7 @@
mov level, %o0; \
call routine; \
add %sp, STACK_BIAS + REGWIN_SZ, %o1; \
ba,a,pt %xcc, rtrap_clr_l6;
ba,a,pt %xcc, rtrap_irq;
#define TICK_SMP_IRQ \
rdpr %pil, %g2; \
......@@ -150,7 +150,7 @@
109: or %g7, %lo(109b), %g7; \
call smp_percpu_timer_interrupt; \
add %sp, STACK_BIAS + REGWIN_SZ, %o0; \
ba,a,pt %xcc, rtrap_clr_l6;
ba,a,pt %xcc, rtrap_irq;
#define TRAP_IVEC TRAP_NOSAVE(do_ivec)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment