Commit 22353f35 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus

* 'upstream' of git://ftp.linux-mips.org/pub/scm/upstream-linus:
  [MIPS] Fix smp barriers in test_and_{change,clear,set}_bit
  [MIPS] Fix IP27 build
  [MIPS] Fix modpost warnings by making start_secondary __cpuinit
  [MIPS] SMTC: Fix build error caused by nonsense code.
  [MIPS] SMTC: The MT ASE requires to initialize c0_pagemask and c0_wired.
  [MIPS] SMTC: Don't continue in set_vi_srs_handler on detected bad arguments.
  [MIPS] SMTC: Fix warning.
  [MIPS] Wire up utimensat, signalfd, timerfd, eventfd
  [MIPS] Atlas: Fix build.
  [MIPS] Always install the DSP exception handler.
  [MIPS] SMTC: Don't set and restore irqregs ptr from self_ipi.
  [MIPS] Fix KMODE for the R3000
parents 3e2ce4da ff72b7a6
......@@ -657,7 +657,11 @@ einval: li v0, -EINVAL
sys sys_getcpu 3
sys sys_epoll_pwait 6
sys sys_ioprio_set 3
sys sys_ioprio_get 2
sys sys_ioprio_get 2 /* 4315 */
sys sys_utimensat 4
sys sys_signalfd 3
sys sys_timerfd 4
sys sys_eventfd 1
.endm
/* We pre-compute the number of _instruction_ bytes needed to
......
......@@ -473,4 +473,8 @@ sys_call_table:
PTR sys_epoll_pwait
PTR sys_ioprio_set
PTR sys_ioprio_get
PTR sys_utimensat /* 5275 */
PTR sys_signalfd
PTR sys_timerfd
PTR sys_eventfd
.size sys_call_table,.-sys_call_table
......@@ -399,4 +399,8 @@ EXPORT(sysn32_call_table)
PTR compat_sys_epoll_pwait
PTR sys_ioprio_set
PTR sys_ioprio_get
PTR compat_sys_utimensat
PTR compat_sys_signalfd /* 5280 */
PTR compat_sys_timerfd
PTR sys_eventfd
.size sysn32_call_table,.-sysn32_call_table
......@@ -521,4 +521,8 @@ sys_call_table:
PTR compat_sys_epoll_pwait
PTR sys_ioprio_set
PTR sys_ioprio_get /* 4315 */
PTR compat_sys_utimensat
PTR compat_sys_signalfd
PTR compat_sys_timerfd
PTR sys_eventfd
.size sys_call_table,.-sys_call_table
......@@ -68,7 +68,7 @@ extern ATTRIB_NORET void cpu_idle(void);
* First C code run on the secondary CPUs after being started up by
* the master.
*/
asmlinkage void start_secondary(void)
asmlinkage __cpuinit void start_secondary(void)
{
unsigned int cpu;
......
......@@ -121,10 +121,7 @@ LEAF(self_ipi)
subu t1,sp,PT_SIZE
sw ra,PT_EPC(t1)
sw a0,PT_PADSLOT4(t1)
LONG_L s0, TI_REGS($28)
LONG_S sp, TI_REGS($28)
la t2,ipi_decode
LONG_S s0, TI_REGS($28)
sw t2,PT_PADSLOT5(t1)
/* Save pre-disable value of TCStatus */
sw t0,PT_TCSTATUS(t1)
......
......@@ -611,12 +611,12 @@ void smtc_cpus_done(void)
int setup_irq_smtc(unsigned int irq, struct irqaction * new,
unsigned long hwmask)
{
#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
unsigned int vpe = current_cpu_data.vpe_id;
irq_hwmask[irq] = hwmask;
#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
vpemask[vpe][irq - MIPSCPU_INT_BASE] = 1;
#endif
irq_hwmask[irq] = hwmask;
return setup_irq(irq, new);
}
......
......@@ -11,6 +11,7 @@
* Copyright (C) 2000, 01 MIPS Technologies, Inc.
* Copyright (C) 2002, 2003, 2004, 2005 Maciej W. Rozycki
*/
#include <linux/bug.h>
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/module.h>
......@@ -1190,8 +1191,8 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
memcpy (b, &except_vec_vi, handler_len);
#ifdef CONFIG_MIPS_MT_SMTC
if (n > 7)
printk("Vector index %d exceeds SMTC maximum\n", n);
BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */
w = (u32 *)(b + mori_offset);
*w = (*w & 0xffff0000) | (0x100 << n);
#endif /* CONFIG_MIPS_MT_SMTC */
......@@ -1383,6 +1384,13 @@ void __init per_cpu_trap_init(void)
cpu_cache_init();
tlb_init();
#ifdef CONFIG_MIPS_MT_SMTC
} else if (!secondaryTC) {
/*
* First TC in non-boot VPE must do subset of tlb_init()
* for MMU countrol registers.
*/
write_c0_pagemask(PM_DEFAULT_MASK);
write_c0_wired(0);
}
#endif /* CONFIG_MIPS_MT_SMTC */
}
......@@ -1531,8 +1539,7 @@ void __init trap_init(void)
if (cpu_has_mipsmt)
set_except_vector(25, handle_mt);
if (cpu_has_dsp)
set_except_vector(26, handle_dsp);
set_except_vector(26, handle_dsp);
if (cpu_has_vce)
/* Special exception: R4[04]00 uses also the divec space. */
......
......@@ -248,14 +248,13 @@ void __init arch_init_irq(void)
case MIPS_REVISION_CORID_CORE_24K:
case MIPS_REVISION_CORID_CORE_EMUL_MSC:
if (cpu_has_veic)
init_msc_irqs (MSC01E_INT_BASE,
init_msc_irqs (MSC01E_INT_BASE, MSC01E_INT_BASE,
msc_eicirqmap, msc_nr_eicirqs);
else
init_msc_irqs (MSC01C_INT_BASE,
init_msc_irqs (MSC01E_INT_BASE, MSC01C_INT_BASE,
msc_irqmap, msc_nr_irqs);
}
if (cpu_has_veic) {
set_vi_handler (MSC01E_INT_ATLAS, atlas_hw0_irqdispatch);
setup_irq (MSC01E_INT_BASE + MSC01E_INT_ATLAS, &atlasirq);
......
......@@ -88,8 +88,6 @@ irqreturn_t mips_timer_interrupt(int irq, void *dev_id)
* the general MIPS timer_interrupt routine.
*/
int vpflags;
/*
* We could be here due to timer interrupt,
* perf counter overflow, or both.
......@@ -98,15 +96,6 @@ irqreturn_t mips_timer_interrupt(int irq, void *dev_id)
perf_irq();
if (read_c0_cause() & (1 << 30)) {
/* If timer interrupt, make it de-assert */
write_c0_compare (read_c0_count() - 1);
/*
* DVPE is necessary so long as cross-VPE interrupts
* are done via read-modify-write of Cause register.
*/
vpflags = dvpe();
clear_c0_cause(CPUCTR_IMASKBIT);
evpe(vpflags);
/*
* There are things we only want to do once per tick
* in an "MP" system. One TC of each VPE will take
......@@ -115,14 +104,13 @@ irqreturn_t mips_timer_interrupt(int irq, void *dev_id)
* the tick on VPE 0 to run the full timer_interrupt().
*/
if (cpu_data[cpu].vpe_id == 0) {
timer_interrupt(irq, NULL);
smtc_timer_broadcast(cpu_data[cpu].vpe_id);
timer_interrupt(irq, NULL);
} else {
write_c0_compare(read_c0_count() +
(mips_hpt_frequency/HZ));
local_timer_interrupt(irq, dev_id);
smtc_timer_broadcast(cpu_data[cpu].vpe_id);
}
smtc_timer_broadcast(cpu_data[cpu].vpe_id);
}
#else /* CONFIG_MIPS_MT_SMTC */
int r2 = cpu_has_mips_r2;
......
......@@ -517,7 +517,7 @@ void __init paging_init(void)
pfn_t start_pfn = slot_getbasepfn(node, 0);
pfn_t end_pfn = node_getmaxclick(node) + 1;
zones_size[ZONE_DMA] = end_pfn - start_pfn;
zones_size[ZONE_NORMAL] = end_pfn - start_pfn;
free_area_init_node(node, NODE_DATA(node),
zones_size, start_pfn, NULL);
......
......@@ -238,10 +238,11 @@ static inline int test_and_set_bit(unsigned long nr,
volatile unsigned long *addr)
{
unsigned short bit = nr & SZLONG_MASK;
unsigned long res;
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp, res;
unsigned long temp;
__asm__ __volatile__(
" .set mips3 \n"
......@@ -254,11 +255,9 @@ static inline int test_and_set_bit(unsigned long nr,
: "=&r" (temp), "=m" (*m), "=&r" (res)
: "r" (1UL << bit), "m" (*m)
: "memory");
return res != 0;
} else if (cpu_has_llsc) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp, res;
unsigned long temp;
__asm__ __volatile__(
" .set push \n"
......@@ -277,25 +276,22 @@ static inline int test_and_set_bit(unsigned long nr,
: "=&r" (temp), "=m" (*m), "=&r" (res)
: "r" (1UL << bit), "m" (*m)
: "memory");
return res != 0;
} else {
volatile unsigned long *a = addr;
unsigned long mask;
int retval;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
retval = (mask & *a) != 0;
res = (mask & *a);
*a |= mask;
raw_local_irq_restore(flags);
return retval;
}
smp_mb();
return res != 0;
}
/*
......@@ -310,6 +306,7 @@ static inline int test_and_clear_bit(unsigned long nr,
volatile unsigned long *addr)
{
unsigned short bit = nr & SZLONG_MASK;
unsigned long res;
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
......@@ -327,12 +324,10 @@ static inline int test_and_clear_bit(unsigned long nr,
: "=&r" (temp), "=m" (*m), "=&r" (res)
: "r" (1UL << bit), "m" (*m)
: "memory");
return res != 0;
#ifdef CONFIG_CPU_MIPSR2
} else if (__builtin_constant_p(nr)) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp, res;
unsigned long temp;
__asm__ __volatile__(
"1: " __LL "%0, %1 # test_and_clear_bit \n"
......@@ -346,12 +341,10 @@ static inline int test_and_clear_bit(unsigned long nr,
: "=&r" (temp), "=m" (*m), "=&r" (res)
: "ri" (bit), "m" (*m)
: "memory");
return res;
#endif
} else if (cpu_has_llsc) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp, res;
unsigned long temp;
__asm__ __volatile__(
" .set push \n"
......@@ -371,25 +364,22 @@ static inline int test_and_clear_bit(unsigned long nr,
: "=&r" (temp), "=m" (*m), "=&r" (res)
: "r" (1UL << bit), "m" (*m)
: "memory");
return res != 0;
} else {
volatile unsigned long *a = addr;
unsigned long mask;
int retval;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
retval = (mask & *a) != 0;
res = (mask & *a);
*a &= ~mask;
raw_local_irq_restore(flags);
return retval;
}
smp_mb();
return res != 0;
}
/*
......@@ -404,10 +394,11 @@ static inline int test_and_change_bit(unsigned long nr,
volatile unsigned long *addr)
{
unsigned short bit = nr & SZLONG_MASK;
unsigned long res;
if (cpu_has_llsc && R10000_LLSC_WAR) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp, res;
unsigned long temp;
__asm__ __volatile__(
" .set mips3 \n"
......@@ -420,11 +411,9 @@ static inline int test_and_change_bit(unsigned long nr,
: "=&r" (temp), "=m" (*m), "=&r" (res)
: "r" (1UL << bit), "m" (*m)
: "memory");
return res != 0;
} else if (cpu_has_llsc) {
unsigned long *m = ((unsigned long *) addr) + (nr >> SZLONG_LOG);
unsigned long temp, res;
unsigned long temp;
__asm__ __volatile__(
" .set push \n"
......@@ -443,24 +432,22 @@ static inline int test_and_change_bit(unsigned long nr,
: "=&r" (temp), "=m" (*m), "=&r" (res)
: "r" (1UL << bit), "m" (*m)
: "memory");
return res != 0;
} else {
volatile unsigned long *a = addr;
unsigned long mask, retval;
unsigned long mask;
unsigned long flags;
a += nr >> SZLONG_LOG;
mask = 1UL << bit;
raw_local_irq_save(flags);
retval = (mask & *a) != 0;
res = (mask & *a);
*a ^= mask;
raw_local_irq_restore(flags);
return retval;
}
smp_mb();
return res != 0;
}
#include <asm-generic/bitops/non-atomic.h>
......
......@@ -17,6 +17,18 @@
#include <asm/mipsregs.h>
#include <asm/asm-offsets.h>
/*
* For SMTC kernel, global IE should be left set, and interrupts
* controlled exclusively via IXMT.
*/
#ifdef CONFIG_MIPS_MT_SMTC
#define STATMASK 0x1e
#elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
#define STATMASK 0x3f
#else
#define STATMASK 0x1f
#endif
#ifdef CONFIG_MIPS_MT_SMTC
#include <asm/mipsmtregs.h>
#endif /* CONFIG_MIPS_MT_SMTC */
......@@ -236,10 +248,10 @@
.set reorder
.set noat
mfc0 a0, CP0_STATUS
ori a0, 0x1f
xori a0, 0x1f
mtc0 a0, CP0_STATUS
li v1, 0xff00
ori a0, STATMASK
xori a0, STATMASK
mtc0 a0, CP0_STATUS
and a0, v1
LONG_L v0, PT_STATUS(sp)
nor v1, $0, v1
......@@ -249,10 +261,6 @@
LONG_L $31, PT_R31(sp)
LONG_L $28, PT_R28(sp)
LONG_L $25, PT_R25(sp)
#ifdef CONFIG_64BIT
LONG_L $8, PT_R8(sp)
LONG_L $9, PT_R9(sp)
#endif
LONG_L $7, PT_R7(sp)
LONG_L $6, PT_R6(sp)
LONG_L $5, PT_R5(sp)
......@@ -273,16 +281,6 @@
.endm
#else
/*
* For SMTC kernel, global IE should be left set, and interrupts
* controlled exclusively via IXMT.
*/
#ifdef CONFIG_MIPS_MT_SMTC
#define STATMASK 0x1e
#else
#define STATMASK 0x1f
#endif
.macro RESTORE_SOME
.set push
.set reorder
......@@ -385,9 +383,9 @@
.macro CLI
#if !defined(CONFIG_MIPS_MT_SMTC)
mfc0 t0, CP0_STATUS
li t1, ST0_CU0 | 0x1f
li t1, ST0_CU0 | STATMASK
or t0, t1
xori t0, 0x1f
xori t0, STATMASK
mtc0 t0, CP0_STATUS
#else /* CONFIG_MIPS_MT_SMTC */
/*
......@@ -420,9 +418,9 @@
.macro STI
#if !defined(CONFIG_MIPS_MT_SMTC)
mfc0 t0, CP0_STATUS
li t1, ST0_CU0 | 0x1f
li t1, ST0_CU0 | STATMASK
or t0, t1
xori t0, 0x1e
xori t0, STATMASK & ~1
mtc0 t0, CP0_STATUS
#else /* CONFIG_MIPS_MT_SMTC */
/*
......@@ -451,7 +449,8 @@
.endm
/*
* Just move to kernel mode and leave interrupts as they are.
* Just move to kernel mode and leave interrupts as they are. Note
* for the R3000 this means copying the previous enable from IEp.
* Set cp0 enable bit as sign that we're running on the kernel stack
*/
.macro KMODE
......@@ -482,9 +481,14 @@
move ra, t0
#endif /* CONFIG_MIPS_MT_SMTC */
mfc0 t0, CP0_STATUS
li t1, ST0_CU0 | 0x1e
li t1, ST0_CU0 | (STATMASK & ~1)
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
andi t2, t0, ST0_IEP
srl t2, 2
or t0, t2
#endif
or t0, t1
xori t0, 0x1e
xori t0, STATMASK & ~1
mtc0 t0, CP0_STATUS
#ifdef CONFIG_MIPS_MT_SMTC
_ehb
......
......@@ -336,16 +336,20 @@
#define __NR_epoll_pwait (__NR_Linux + 313)
#define __NR_ioprio_set (__NR_Linux + 314)
#define __NR_ioprio_get (__NR_Linux + 315)
#define __NR_utimensat (__NR_Linux + 316)
#define __NR_signalfd (__NR_Linux + 317)
#define __NR_timerfd (__NR_Linux + 318)
#define __NR_eventfd (__NR_Linux + 319)
/*
* Offset of the last Linux o32 flavoured syscall
*/
#define __NR_Linux_syscalls 315
#define __NR_Linux_syscalls 319
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#define __NR_O32_Linux 4000
#define __NR_O32_Linux_syscalls 315
#define __NR_O32_Linux_syscalls 319
#if _MIPS_SIM == _MIPS_SIM_ABI64
......@@ -628,16 +632,20 @@
#define __NR_epoll_pwait (__NR_Linux + 272)
#define __NR_ioprio_set (__NR_Linux + 273)
#define __NR_ioprio_get (__NR_Linux + 274)
#define __NR_utimensat (__NR_Linux + 275)
#define __NR_signalfd (__NR_Linux + 276)
#define __NR_timerfd (__NR_Linux + 277)
#define __NR_eventfd (__NR_Linux + 278)
/*
* Offset of the last Linux 64-bit flavoured syscall
*/
#define __NR_Linux_syscalls 274
#define __NR_Linux_syscalls 278
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
#define __NR_64_Linux 5000
#define __NR_64_Linux_syscalls 274
#define __NR_64_Linux_syscalls 278
#if _MIPS_SIM == _MIPS_SIM_NABI32
......@@ -924,16 +932,20 @@
#define __NR_epoll_pwait (__NR_Linux + 276)
#define __NR_ioprio_set (__NR_Linux + 277)
#define __NR_ioprio_get (__NR_Linux + 278)
#define __NR_utimensat (__NR_Linux + 279)
#define __NR_signalfd (__NR_Linux + 280)
#define __NR_timerfd (__NR_Linux + 281)
#define __NR_eventfd (__NR_Linux + 282)
/*
* Offset of the last N32 flavoured syscall
*/
#define __NR_Linux_syscalls 278
#define __NR_Linux_syscalls 282
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
#define __NR_N32_Linux 6000
#define __NR_N32_Linux_syscalls 278
#define __NR_N32_Linux_syscalls 282
#ifdef __KERNEL__
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment