Commit b633648c authored by Ralf Baechle's avatar Ralf Baechle

MIPS: MT: Remove SMTC support

Nobody is maintaining SMTC anymore and there also seems to be no userbase.
Which is a pity - the SMTC technology primarily developed by Kevin D.
Kissell <kevink@paralogos.com> is an ingenious demonstration for the MT
ASE's power and elegance.

Based on Markos Chandras <Markos.Chandras@imgtec.com> patch
https://patchwork.linux-mips.org/patch/6719/ which while very similar did
no longer apply cleanly when I tried to merge it plus some additional
post-SMTC cleanup - SMTC was a feature as tricky to remove as it was to
merge once upon a time.
Signed-off-by: default avatarRalf Baechle <ralf@linux-mips.org>
parent 8b2e62cc
...@@ -1852,7 +1852,7 @@ config FORCE_MAX_ZONEORDER ...@@ -1852,7 +1852,7 @@ config FORCE_MAX_ZONEORDER
config CEVT_GIC config CEVT_GIC
bool "Use GIC global counter for clock events" bool "Use GIC global counter for clock events"
depends on IRQ_GIC && !(MIPS_SEAD3 || MIPS_MT_SMTC) depends on IRQ_GIC && !MIPS_SEAD3
help help
Use the GIC global counter for the clock events. The R4K clock Use the GIC global counter for the clock events. The R4K clock
event driver is always present, so if the platform ends up not event driver is always present, so if the platform ends up not
...@@ -1936,24 +1936,6 @@ config MIPS_MT_SMP ...@@ -1936,24 +1936,6 @@ config MIPS_MT_SMP
Intel Hyperthreading feature. For further information go to Intel Hyperthreading feature. For further information go to
<http://www.imgtec.com/mips/mips-multithreading.asp>. <http://www.imgtec.com/mips/mips-multithreading.asp>.
config MIPS_MT_SMTC
bool "Use all TCs on all VPEs for SMP (DEPRECATED)"
depends on CPU_MIPS32_R2
depends on SYS_SUPPORTS_MULTITHREADING
depends on !MIPS_CPS
select CPU_MIPSR2_IRQ_VI
select CPU_MIPSR2_IRQ_EI
select MIPS_MT
select SMP
select SMP_UP
select SYS_SUPPORTS_SMP
select NR_CPUS_DEFAULT_8
help
This is a kernel model which is known as SMTC. This is
supported on cores with the MT ASE and presents all TCs
available on all VPEs to support SMP. For further
information see <http://www.linux-mips.org/wiki/34K#SMTC>.
endchoice endchoice
config MIPS_MT config MIPS_MT
...@@ -1977,7 +1959,7 @@ config SYS_SUPPORTS_MULTITHREADING ...@@ -1977,7 +1959,7 @@ config SYS_SUPPORTS_MULTITHREADING
config MIPS_MT_FPAFF config MIPS_MT_FPAFF
bool "Dynamic FPU affinity for FP-intensive threads" bool "Dynamic FPU affinity for FP-intensive threads"
default y default y
depends on MIPS_MT_SMP || MIPS_MT_SMTC depends on MIPS_MT_SMP
config MIPS_VPE_LOADER config MIPS_VPE_LOADER
bool "VPE loader support." bool "VPE loader support."
...@@ -1999,29 +1981,6 @@ config MIPS_VPE_LOADER_MT ...@@ -1999,29 +1981,6 @@ config MIPS_VPE_LOADER_MT
default "y" default "y"
depends on MIPS_VPE_LOADER && !MIPS_CMP depends on MIPS_VPE_LOADER && !MIPS_CMP
config MIPS_MT_SMTC_IM_BACKSTOP
bool "Use per-TC register bits as backstop for inhibited IM bits"
depends on MIPS_MT_SMTC
default n
help
To support multiple TC microthreads acting as "CPUs" within
a VPE, VPE-wide interrupt mask bits must be specially manipulated
during interrupt handling. To support legacy drivers and interrupt
controller management code, SMTC has a "backstop" to track and
if necessary restore the interrupt mask. This has some performance
impact on interrupt service overhead.
config MIPS_MT_SMTC_IRQAFF
bool "Support IRQ affinity API"
depends on MIPS_MT_SMTC
default n
help
Enables SMP IRQ affinity API (/proc/irq/*/smp_affinity, etc.)
for SMTC Linux kernel. Requires platform support, of which
an example can be found in the MIPS kernel i8259 and Malta
platform code. Adds some overhead to interrupt dispatch, and
should be used only if you know what you are doing.
config MIPS_VPE_LOADER_TOM config MIPS_VPE_LOADER_TOM
bool "Load VPE program into memory hidden from linux" bool "Load VPE program into memory hidden from linux"
depends on MIPS_VPE_LOADER depends on MIPS_VPE_LOADER
...@@ -2049,7 +2008,7 @@ config MIPS_VPE_APSP_API_MT ...@@ -2049,7 +2008,7 @@ config MIPS_VPE_APSP_API_MT
config MIPS_CMP config MIPS_CMP
bool "MIPS CMP framework support (DEPRECATED)" bool "MIPS CMP framework support (DEPRECATED)"
depends on SYS_SUPPORTS_MIPS_CMP && !MIPS_MT_SMTC depends on SYS_SUPPORTS_MIPS_CMP
select MIPS_GIC_IPI select MIPS_GIC_IPI
select SYNC_R4K select SYNC_R4K
select WEAK_ORDERING select WEAK_ORDERING
...@@ -2256,7 +2215,7 @@ config NODES_SHIFT ...@@ -2256,7 +2215,7 @@ config NODES_SHIFT
config HW_PERF_EVENTS config HW_PERF_EVENTS
bool "Enable hardware performance counter support for perf events" bool "Enable hardware performance counter support for perf events"
depends on PERF_EVENTS && !MIPS_MT_SMTC && OPROFILE=n && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON || CPU_XLP) depends on PERF_EVENTS && OPROFILE=n && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON || CPU_XLP)
default y default y
help help
Enable hardware performance counter support for perf events. If Enable hardware performance counter support for perf events. If
......
...@@ -79,15 +79,6 @@ config CMDLINE_OVERRIDE ...@@ -79,15 +79,6 @@ config CMDLINE_OVERRIDE
Normally, you will choose 'N' here. Normally, you will choose 'N' here.
config SMTC_IDLE_HOOK_DEBUG
bool "Enable additional debug checks before going into CPU idle loop"
depends on DEBUG_KERNEL && MIPS_MT_SMTC
help
This option enables Enable additional debug checks before going into
CPU idle loop. For details on these checks, see
arch/mips/kernel/smtc.c. This debugging option result in significant
overhead so should be disabled in production kernels.
config SB1XXX_CORELIS config SB1XXX_CORELIS
bool "Corelis Debugger" bool "Corelis Debugger"
depends on SIBYTE_SB1xxx_SOC depends on SIBYTE_SB1xxx_SOC
......
CONFIG_MIPS_MALTA=y
CONFIG_CPU_LITTLE_ENDIAN=y
CONFIG_CPU_MIPS32_R2=y
CONFIG_PAGE_SIZE_16KB=y
CONFIG_MIPS_MT_SMTC=y
# CONFIG_MIPS_MT_FPAFF is not set
CONFIG_NR_CPUS=9
CONFIG_HZ_48=y
CONFIG_LOCALVERSION="smtc"
CONFIG_SYSVIPC=y
CONFIG_POSIX_MQUEUE=y
CONFIG_AUDIT=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=15
CONFIG_SYSCTL_SYSCALL=y
CONFIG_EMBEDDED=y
CONFIG_SLAB=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
# CONFIG_BLK_DEV_BSG is not set
CONFIG_PCI=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_XFRM_USER=m
CONFIG_NET_KEY=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
CONFIG_IP_ADVANCED_ROUTER=y
CONFIG_IP_MULTIPLE_TABLES=y
CONFIG_IP_ROUTE_MULTIPATH=y
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
CONFIG_IP_PNP_BOOTP=y
CONFIG_NET_IPIP=m
CONFIG_IP_MROUTE=y
CONFIG_IP_PIMSM_V1=y
CONFIG_IP_PIMSM_V2=y
CONFIG_SYN_COOKIES=y
CONFIG_INET_AH=m
CONFIG_INET_ESP=m
CONFIG_INET_IPCOMP=m
# CONFIG_INET_LRO is not set
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
CONFIG_IPV6_TUNNEL=m
CONFIG_BRIDGE=m
CONFIG_VLAN_8021Q=m
CONFIG_ATALK=m
CONFIG_DEV_APPLETALK=m
CONFIG_IPDDP=m
CONFIG_IPDDP_ENCAP=y
CONFIG_NET_SCHED=y
CONFIG_NET_SCH_CBQ=m
CONFIG_NET_SCH_HTB=m
CONFIG_NET_SCH_HFSC=m
CONFIG_NET_SCH_PRIO=m
CONFIG_NET_SCH_RED=m
CONFIG_NET_SCH_SFQ=m
CONFIG_NET_SCH_TEQL=m
CONFIG_NET_SCH_TBF=m
CONFIG_NET_SCH_GRED=m
CONFIG_NET_SCH_DSMARK=m
CONFIG_NET_SCH_NETEM=m
CONFIG_NET_SCH_INGRESS=m
CONFIG_NET_CLS_BASIC=m
CONFIG_NET_CLS_TCINDEX=m
CONFIG_NET_CLS_ROUTE4=m
CONFIG_NET_CLS_FW=m
CONFIG_NET_CLS_U32=m
CONFIG_NET_CLS_RSVP=m
CONFIG_NET_CLS_RSVP6=m
CONFIG_NET_CLS_ACT=y
CONFIG_NET_ACT_POLICE=y
CONFIG_NET_CLS_IND=y
# CONFIG_WIRELESS is not set
CONFIG_DEVTMPFS=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_CRYPTOLOOP=m
CONFIG_IDE=y
# CONFIG_IDE_PROC_FS is not set
# CONFIG_IDEPCI_PCIBUS_ORDER is not set
CONFIG_BLK_DEV_GENERIC=y
CONFIG_BLK_DEV_PIIX=y
CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_NETDEVICES=y
# CONFIG_NET_VENDOR_3COM is not set
# CONFIG_NET_VENDOR_ADAPTEC is not set
# CONFIG_NET_VENDOR_ALTEON is not set
CONFIG_PCNET32=y
# CONFIG_NET_VENDOR_ATHEROS is not set
# CONFIG_NET_VENDOR_BROADCOM is not set
# CONFIG_NET_VENDOR_BROCADE is not set
# CONFIG_NET_VENDOR_CHELSIO is not set
# CONFIG_NET_VENDOR_CISCO is not set
# CONFIG_NET_VENDOR_DEC is not set
# CONFIG_NET_VENDOR_DLINK is not set
# CONFIG_NET_VENDOR_EMULEX is not set
# CONFIG_NET_VENDOR_EXAR is not set
# CONFIG_NET_VENDOR_HP is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MELLANOX is not set
# CONFIG_NET_VENDOR_MICREL is not set
# CONFIG_NET_VENDOR_MYRI is not set
# CONFIG_NET_VENDOR_NATSEMI is not set
# CONFIG_NET_VENDOR_NVIDIA is not set
# CONFIG_NET_VENDOR_OKI is not set
# CONFIG_NET_PACKET_ENGINE is not set
# CONFIG_NET_VENDOR_QLOGIC is not set
# CONFIG_NET_VENDOR_REALTEK is not set
# CONFIG_NET_VENDOR_RDC is not set
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SILAN is not set
# CONFIG_NET_VENDOR_SIS is not set
# CONFIG_NET_VENDOR_SMSC is not set
# CONFIG_NET_VENDOR_STMICRO is not set
# CONFIG_NET_VENDOR_SUN is not set
# CONFIG_NET_VENDOR_TEHUTI is not set
# CONFIG_NET_VENDOR_TI is not set
# CONFIG_NET_VENDOR_TOSHIBA is not set
# CONFIG_NET_VENDOR_VIA is not set
# CONFIG_WLAN is not set
# CONFIG_VT is not set
CONFIG_LEGACY_PTY_COUNT=16
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
CONFIG_HW_RANDOM=y
# CONFIG_HWMON is not set
CONFIG_VIDEO_OUTPUT_CONTROL=m
CONFIG_FB=y
CONFIG_FIRMWARE_EDID=y
CONFIG_FB_MATROX=y
CONFIG_FB_MATROX_G=y
CONFIG_USB=y
CONFIG_USB_EHCI_HCD=y
# CONFIG_USB_EHCI_TT_NEWSCHED is not set
CONFIG_USB_UHCI_HCD=y
CONFIG_USB_STORAGE=y
CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
CONFIG_LEDS_TRIGGER_IDE_DISK=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_LEDS_TRIGGER_BACKLIGHT=y
CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_CMOS=y
CONFIG_EXT2_FS=y
CONFIG_EXT3_FS=y
# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set
CONFIG_XFS_FS=y
CONFIG_XFS_QUOTA=y
CONFIG_XFS_POSIX_ACL=y
CONFIG_QUOTA=y
CONFIG_QFMT_V2=y
CONFIG_MSDOS_FS=m
CONFIG_VFAT_FS=m
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
CONFIG_CIFS=m
CONFIG_CIFS_WEAK_PW_HASH=y
CONFIG_CIFS_XATTR=y
CONFIG_CIFS_POSIX=y
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_ISO8859_1=m
# CONFIG_FTRACE is not set
CONFIG_CRYPTO_NULL=m
CONFIG_CRYPTO_PCBC=m
CONFIG_CRYPTO_HMAC=y
CONFIG_CRYPTO_MICHAEL_MIC=m
CONFIG_CRYPTO_SHA512=m
CONFIG_CRYPTO_TGR192=m
CONFIG_CRYPTO_WP512=m
CONFIG_CRYPTO_ANUBIS=m
CONFIG_CRYPTO_BLOWFISH=m
CONFIG_CRYPTO_CAST5=m
CONFIG_CRYPTO_CAST6=m
CONFIG_CRYPTO_KHAZAD=m
CONFIG_CRYPTO_SERPENT=m
CONFIG_CRYPTO_TEA=m
CONFIG_CRYPTO_TWOFISH=m
# CONFIG_CRYPTO_ANSI_CPRNG is not set
# CONFIG_CRYPTO_HW is not set
...@@ -17,26 +17,8 @@ ...@@ -17,26 +17,8 @@
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
#include <asm/asmmacro-64.h> #include <asm/asmmacro-64.h>
#endif #endif
#ifdef CONFIG_MIPS_MT_SMTC
#include <asm/mipsmtregs.h>
#endif
#ifdef CONFIG_MIPS_MT_SMTC
.macro local_irq_enable reg=t0
mfc0 \reg, CP0_TCSTATUS
ori \reg, \reg, TCSTATUS_IXMT
xori \reg, \reg, TCSTATUS_IXMT
mtc0 \reg, CP0_TCSTATUS
_ehb
.endm
.macro local_irq_disable reg=t0 #ifdef CONFIG_CPU_MIPSR2
mfc0 \reg, CP0_TCSTATUS
ori \reg, \reg, TCSTATUS_IXMT
mtc0 \reg, CP0_TCSTATUS
_ehb
.endm
#elif defined(CONFIG_CPU_MIPSR2)
.macro local_irq_enable reg=t0 .macro local_irq_enable reg=t0
ei ei
irq_enable_hazard irq_enable_hazard
...@@ -71,7 +53,7 @@ ...@@ -71,7 +53,7 @@
sw \reg, TI_PRE_COUNT($28) sw \reg, TI_PRE_COUNT($28)
#endif #endif
.endm .endm
#endif /* CONFIG_MIPS_MT_SMTC */ #endif /* CONFIG_CPU_MIPSR2 */
.macro fpu_save_16even thread tmp=t0 .macro fpu_save_16even thread tmp=t0
cfc1 \tmp, fcr31 cfc1 \tmp, fcr31
......
...@@ -65,17 +65,12 @@ struct cpuinfo_mips { ...@@ -65,17 +65,12 @@ struct cpuinfo_mips {
#ifdef CONFIG_64BIT #ifdef CONFIG_64BIT
int vmbits; /* Virtual memory size in bits */ int vmbits; /* Virtual memory size in bits */
#endif #endif
#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) #ifdef CONFIG_MIPS_MT_SMP
/* /*
* In the MIPS MT "SMTC" model, each TC is considered * There is not necessarily a 1:1 mapping of VPE num to CPU number
* to be a "CPU" for the purposes of scheduling, but * in particular on multi-core systems.
* exception resources, ASID spaces, etc, are common
* to all TCs within the same VPE.
*/ */
int vpe_id; /* Virtual Processor number */ int vpe_id; /* Virtual Processor number */
#endif
#ifdef CONFIG_MIPS_MT_SMTC
int tc_id; /* Thread Context number */
#endif #endif
void *data; /* Additional data */ void *data; /* Additional data */
unsigned int watch_reg_count; /* Number that exist */ unsigned int watch_reg_count; /* Number that exist */
...@@ -117,7 +112,7 @@ struct proc_cpuinfo_notifier_args { ...@@ -117,7 +112,7 @@ struct proc_cpuinfo_notifier_args {
unsigned long n; unsigned long n;
}; };
#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) #ifdef CONFIG_MIPS_MT_SMP
# define cpu_vpe_id(cpuinfo) ((cpuinfo)->vpe_id) # define cpu_vpe_id(cpuinfo) ((cpuinfo)->vpe_id)
#else #else
# define cpu_vpe_id(cpuinfo) 0 # define cpu_vpe_id(cpuinfo) 0
......
...@@ -48,11 +48,7 @@ ...@@ -48,11 +48,7 @@
enum fixed_addresses { enum fixed_addresses {
#define FIX_N_COLOURS 8 #define FIX_N_COLOURS 8
FIX_CMAP_BEGIN, FIX_CMAP_BEGIN,
#ifdef CONFIG_MIPS_MT_SMTC
FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * NR_CPUS * 2),
#else
FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * 2), FIX_CMAP_END = FIX_CMAP_BEGIN + (FIX_N_COLOURS * 2),
#endif
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
/* reserved pte's for temporary kernel mappings */ /* reserved pte's for temporary kernel mappings */
FIX_KMAP_BEGIN = FIX_CMAP_END + 1, FIX_KMAP_BEGIN = FIX_CMAP_END + 1,
......
...@@ -26,104 +26,8 @@ static inline int irq_canonicalize(int irq) ...@@ -26,104 +26,8 @@ static inline int irq_canonicalize(int irq)
#define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */ #define irq_canonicalize(irq) (irq) /* Sane hardware, sane code ... */
#endif #endif
#ifdef CONFIG_MIPS_MT_SMTC
struct irqaction;
extern unsigned long irq_hwmask[];
extern int setup_irq_smtc(unsigned int irq, struct irqaction * new,
unsigned long hwmask);
static inline void smtc_im_ack_irq(unsigned int irq)
{
if (irq_hwmask[irq] & ST0_IM)
set_c0_status(irq_hwmask[irq] & ST0_IM);
}
#else
static inline void smtc_im_ack_irq(unsigned int irq)
{
}
#endif /* CONFIG_MIPS_MT_SMTC */
#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
#include <linux/cpumask.h>
extern int plat_set_irq_affinity(struct irq_data *d,
const struct cpumask *affinity, bool force);
extern void smtc_forward_irq(struct irq_data *d);
/*
* IRQ affinity hook invoked at the beginning of interrupt dispatch
* if option is enabled.
*
* Up through Linux 2.6.22 (at least) cpumask operations are very
* inefficient on MIPS. Initial prototypes of SMTC IRQ affinity
* used a "fast path" per-IRQ-descriptor cache of affinity information
* to reduce latency. As there is a project afoot to optimize the
* cpumask implementations, this version is optimistically assuming
* that cpumask.h macro overhead is reasonable during interrupt dispatch.
*/
static inline int handle_on_other_cpu(unsigned int irq)
{
struct irq_data *d = irq_get_irq_data(irq);
if (cpumask_test_cpu(smp_processor_id(), d->affinity))
return 0;
smtc_forward_irq(d);
return 1;
}
#else /* Not doing SMTC affinity */
static inline int handle_on_other_cpu(unsigned int irq) { return 0; }
#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
static inline void smtc_im_backstop(unsigned int irq)
{
if (irq_hwmask[irq] & 0x0000ff00)
write_c0_tccontext(read_c0_tccontext() &
~(irq_hwmask[irq] & 0x0000ff00));
}
/*
* Clear interrupt mask handling "backstop" if irq_hwmask
* entry so indicates. This implies that the ack() or end()
* functions will take over re-enabling the low-level mask.
* Otherwise it will be done on return from exception.
*/
static inline int smtc_handle_on_other_cpu(unsigned int irq)
{
int ret = handle_on_other_cpu(irq);
if (!ret)
smtc_im_backstop(irq);
return ret;
}
#else
static inline void smtc_im_backstop(unsigned int irq) { }
static inline int smtc_handle_on_other_cpu(unsigned int irq)
{
return handle_on_other_cpu(irq);
}
#endif
extern void do_IRQ(unsigned int irq); extern void do_IRQ(unsigned int irq);
#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
extern void do_IRQ_no_affinity(unsigned int irq);
#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
extern void arch_init_irq(void); extern void arch_init_irq(void);
extern void spurious_interrupt(void); extern void spurious_interrupt(void);
......
...@@ -17,7 +17,7 @@ ...@@ -17,7 +17,7 @@
#include <linux/stringify.h> #include <linux/stringify.h>
#include <asm/hazards.h> #include <asm/hazards.h>
#if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) #ifdef CONFIG_CPU_MIPSR2
static inline void arch_local_irq_disable(void) static inline void arch_local_irq_disable(void)
{ {
...@@ -118,30 +118,15 @@ void arch_local_irq_disable(void); ...@@ -118,30 +118,15 @@ void arch_local_irq_disable(void);
unsigned long arch_local_irq_save(void); unsigned long arch_local_irq_save(void);
void arch_local_irq_restore(unsigned long flags); void arch_local_irq_restore(unsigned long flags);
void __arch_local_irq_restore(unsigned long flags); void __arch_local_irq_restore(unsigned long flags);
#endif /* if defined(CONFIG_CPU_MIPSR2) && !defined(CONFIG_MIPS_MT_SMTC) */ #endif /* CONFIG_CPU_MIPSR2 */
extern void smtc_ipi_replay(void);
static inline void arch_local_irq_enable(void) static inline void arch_local_irq_enable(void)
{ {
#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC kernel needs to do a software replay of queued
* IPIs, at the cost of call overhead on each local_irq_enable()
*/
smtc_ipi_replay();
#endif
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n" " .set push \n"
" .set reorder \n" " .set reorder \n"
" .set noat \n" " .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC #if defined(CONFIG_CPU_MIPSR2)
" mfc0 $1, $2, 1 # SMTC - clear TCStatus.IXMT \n"
" ori $1, 0x400 \n"
" xori $1, 0x400 \n"
" mtc0 $1, $2, 1 \n"
#elif defined(CONFIG_CPU_MIPSR2)
" ei \n" " ei \n"
#else #else
" mfc0 $1,$12 \n" " mfc0 $1,$12 \n"
...@@ -163,11 +148,7 @@ static inline unsigned long arch_local_save_flags(void) ...@@ -163,11 +148,7 @@ static inline unsigned long arch_local_save_flags(void)
asm __volatile__( asm __volatile__(
" .set push \n" " .set push \n"
" .set reorder \n" " .set reorder \n"
#ifdef CONFIG_MIPS_MT_SMTC
" mfc0 %[flags], $2, 1 \n"
#else
" mfc0 %[flags], $12 \n" " mfc0 %[flags], $12 \n"
#endif
" .set pop \n" " .set pop \n"
: [flags] "=r" (flags)); : [flags] "=r" (flags));
...@@ -177,14 +158,7 @@ static inline unsigned long arch_local_save_flags(void) ...@@ -177,14 +158,7 @@ static inline unsigned long arch_local_save_flags(void)
static inline int arch_irqs_disabled_flags(unsigned long flags) static inline int arch_irqs_disabled_flags(unsigned long flags)
{ {
#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC model uses TCStatus.IXMT to disable interrupts for a thread/CPU
*/
return flags & 0x400;
#else
return !(flags & 1); return !(flags & 1);
#endif
} }
#endif /* #ifndef __ASSEMBLY__ */ #endif /* #ifndef __ASSEMBLY__ */
......
...@@ -80,36 +80,6 @@ ...@@ -80,36 +80,6 @@
.endm .endm
.macro kernel_entry_setup .macro kernel_entry_setup
#ifdef CONFIG_MIPS_MT_SMTC
mfc0 t0, CP0_CONFIG
bgez t0, 9f
mfc0 t0, CP0_CONFIG, 1
bgez t0, 9f
mfc0 t0, CP0_CONFIG, 2
bgez t0, 9f
mfc0 t0, CP0_CONFIG, 3
and t0, 1<<2
bnez t0, 0f
9:
/* Assume we came from YAMON... */
PTR_LA v0, 0x9fc00534 /* YAMON print */
lw v0, (v0)
move a0, zero
PTR_LA a1, nonmt_processor
jal v0
PTR_LA v0, 0x9fc00520 /* YAMON exit */
lw v0, (v0)
li a0, 1
jal v0
1: b 1b
__INITDATA
nonmt_processor:
.asciz "SMTC kernel requires the MT ASE to run\n"
__FINIT
#endif
#ifdef CONFIG_EVA #ifdef CONFIG_EVA
sync sync
......
...@@ -10,37 +10,6 @@ ...@@ -10,37 +10,6 @@
#define __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H #define __ASM_MACH_MIPS_KERNEL_ENTRY_INIT_H
.macro kernel_entry_setup .macro kernel_entry_setup
#ifdef CONFIG_MIPS_MT_SMTC
mfc0 t0, CP0_CONFIG
bgez t0, 9f
mfc0 t0, CP0_CONFIG, 1
bgez t0, 9f
mfc0 t0, CP0_CONFIG, 2
bgez t0, 9f
mfc0 t0, CP0_CONFIG, 3
and t0, 1<<2
bnez t0, 0f
9 :
/* Assume we came from YAMON... */
PTR_LA v0, 0x9fc00534 /* YAMON print */
lw v0, (v0)
move a0, zero
PTR_LA a1, nonmt_processor
jal v0
PTR_LA v0, 0x9fc00520 /* YAMON exit */
lw v0, (v0)
li a0, 1
jal v0
1 : b 1b
__INITDATA
nonmt_processor :
.asciz "SMTC kernel requires the MT ASE to run\n"
__FINIT
0 :
#endif
.endm .endm
/* /*
......
/* /*
* Definitions and decalrations for MIPS MT support * Definitions and decalrations for MIPS MT support that are common between
* that are common between SMTC, VSMP, and/or AP/SP * the VSMP, and AP/SP kernel models.
* kernel models.
*/ */
#ifndef __ASM_MIPS_MT_H #ifndef __ASM_MIPS_MT_H
#define __ASM_MIPS_MT_H #define __ASM_MIPS_MT_H
......
...@@ -1014,19 +1014,8 @@ do { \ ...@@ -1014,19 +1014,8 @@ do { \
#define write_c0_compare3(val) __write_32bit_c0_register($11, 7, val) #define write_c0_compare3(val) __write_32bit_c0_register($11, 7, val)
#define read_c0_status() __read_32bit_c0_register($12, 0) #define read_c0_status() __read_32bit_c0_register($12, 0)
#ifdef CONFIG_MIPS_MT_SMTC
#define write_c0_status(val) \
do { \
__write_32bit_c0_register($12, 0, val); \
__ehb(); \
} while (0)
#else
/*
* Legacy non-SMTC code, which may be hazardous
* but which might not support EHB
*/
#define write_c0_status(val) __write_32bit_c0_register($12, 0, val) #define write_c0_status(val) __write_32bit_c0_register($12, 0, val)
#endif /* CONFIG_MIPS_MT_SMTC */
#define read_c0_cause() __read_32bit_c0_register($13, 0) #define read_c0_cause() __read_32bit_c0_register($13, 0)
#define write_c0_cause(val) __write_32bit_c0_register($13, 0, val) #define write_c0_cause(val) __write_32bit_c0_register($13, 0, val)
...@@ -1750,11 +1739,6 @@ static inline void tlb_write_random(void) ...@@ -1750,11 +1739,6 @@ static inline void tlb_write_random(void)
/* /*
* Manipulate bits in a c0 register. * Manipulate bits in a c0 register.
*/ */
#ifndef CONFIG_MIPS_MT_SMTC
/*
* SMTC Linux requires shutting-down microthread scheduling
* during CP0 register read-modify-write sequences.
*/
#define __BUILD_SET_C0(name) \ #define __BUILD_SET_C0(name) \
static inline unsigned int \ static inline unsigned int \
set_c0_##name(unsigned int set) \ set_c0_##name(unsigned int set) \
...@@ -1793,121 +1777,6 @@ change_c0_##name(unsigned int change, unsigned int val) \ ...@@ -1793,121 +1777,6 @@ change_c0_##name(unsigned int change, unsigned int val) \
return res; \ return res; \
} }
#else /* SMTC versions that manage MT scheduling */
#include <linux/irqflags.h>
/*
* This is a duplicate of dmt() in mipsmtregs.h to avoid problems with
* header file recursion.
*/
static inline unsigned int __dmt(void)
{
int res;
__asm__ __volatile__(
" .set push \n"
" .set mips32r2 \n"
" .set noat \n"
" .word 0x41610BC1 # dmt $1 \n"
" ehb \n"
" move %0, $1 \n"
" .set pop \n"
: "=r" (res));
instruction_hazard();
return res;
}
#define __VPECONTROL_TE_SHIFT 15
#define __VPECONTROL_TE (1UL << __VPECONTROL_TE_SHIFT)
#define __EMT_ENABLE __VPECONTROL_TE
static inline void __emt(unsigned int previous)
{
if ((previous & __EMT_ENABLE))
__asm__ __volatile__(
" .set mips32r2 \n"
" .word 0x41600be1 # emt \n"
" ehb \n"
" .set mips0 \n");
}
static inline void __ehb(void)
{
__asm__ __volatile__(
" .set mips32r2 \n"
" ehb \n" " .set mips0 \n");
}
/*
* Note that local_irq_save/restore affect TC-specific IXMT state,
* not Status.IE as in non-SMTC kernel.
*/
#define __BUILD_SET_C0(name) \
static inline unsigned int \
set_c0_##name(unsigned int set) \
{ \
unsigned int res; \
unsigned int new; \
unsigned int omt; \
unsigned long flags; \
\
local_irq_save(flags); \
omt = __dmt(); \
res = read_c0_##name(); \
new = res | set; \
write_c0_##name(new); \
__emt(omt); \
local_irq_restore(flags); \
\
return res; \
} \
\
static inline unsigned int \
clear_c0_##name(unsigned int clear) \
{ \
unsigned int res; \
unsigned int new; \
unsigned int omt; \
unsigned long flags; \
\
local_irq_save(flags); \
omt = __dmt(); \
res = read_c0_##name(); \
new = res & ~clear; \
write_c0_##name(new); \
__emt(omt); \
local_irq_restore(flags); \
\
return res; \
} \
\
static inline unsigned int \
change_c0_##name(unsigned int change, unsigned int newbits) \
{ \
unsigned int res; \
unsigned int new; \
unsigned int omt; \
unsigned long flags; \
\
local_irq_save(flags); \
\
omt = __dmt(); \
res = read_c0_##name(); \
new = res & ~change; \
new |= (newbits & change); \
write_c0_##name(new); \
__emt(omt); \
local_irq_restore(flags); \
\
return res; \
}
#endif
__BUILD_SET_C0(status) __BUILD_SET_C0(status)
__BUILD_SET_C0(cause) __BUILD_SET_C0(cause)
__BUILD_SET_C0(config) __BUILD_SET_C0(config)
......
...@@ -18,10 +18,6 @@ ...@@ -18,10 +18,6 @@
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/hazards.h> #include <asm/hazards.h>
#include <asm/tlbflush.h> #include <asm/tlbflush.h>
#ifdef CONFIG_MIPS_MT_SMTC
#include <asm/mipsmtregs.h>
#include <asm/smtc.h>
#endif /* SMTC */
#include <asm-generic/mm_hooks.h> #include <asm-generic/mm_hooks.h>
#define TLBMISS_HANDLER_SETUP_PGD(pgd) \ #define TLBMISS_HANDLER_SETUP_PGD(pgd) \
...@@ -63,13 +59,6 @@ extern unsigned long pgd_current[]; ...@@ -63,13 +59,6 @@ extern unsigned long pgd_current[];
#define ASID_INC 0x10 #define ASID_INC 0x10
#define ASID_MASK 0xff0 #define ASID_MASK 0xff0
#elif defined(CONFIG_MIPS_MT_SMTC)
#define ASID_INC 0x1
extern unsigned long smtc_asid_mask;
#define ASID_MASK (smtc_asid_mask)
#define HW_ASID_MASK 0xff
/* End SMTC/34K debug hack */
#else /* FIXME: not correct for R6000 */ #else /* FIXME: not correct for R6000 */
#define ASID_INC 0x1 #define ASID_INC 0x1
...@@ -92,7 +81,6 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk) ...@@ -92,7 +81,6 @@ static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
#define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1))) #define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1) #define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1)
#ifndef CONFIG_MIPS_MT_SMTC
/* Normal, classic MIPS get_new_mmu_context */ /* Normal, classic MIPS get_new_mmu_context */
static inline void static inline void
get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
...@@ -115,12 +103,6 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu) ...@@ -115,12 +103,6 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
cpu_context(cpu, mm) = asid_cache(cpu) = asid; cpu_context(cpu, mm) = asid_cache(cpu) = asid;
} }
#else /* CONFIG_MIPS_MT_SMTC */
#define get_new_mmu_context(mm, cpu) smtc_get_new_mmu_context((mm), (cpu))
#endif /* CONFIG_MIPS_MT_SMTC */
/* /*
* Initialize the context related info for a new mm_struct * Initialize the context related info for a new mm_struct
* instance. * instance.
...@@ -141,46 +123,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next, ...@@ -141,46 +123,12 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
{ {
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
unsigned long flags; unsigned long flags;
#ifdef CONFIG_MIPS_MT_SMTC
unsigned long oldasid;
unsigned long mtflags;
int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id;
local_irq_save(flags); local_irq_save(flags);
mtflags = dvpe();
#else /* Not SMTC */
local_irq_save(flags);
#endif /* CONFIG_MIPS_MT_SMTC */
/* Check if our ASID is of an older version and thus invalid */ /* Check if our ASID is of an older version and thus invalid */
if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK) if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK)
get_new_mmu_context(next, cpu); get_new_mmu_context(next, cpu);
#ifdef CONFIG_MIPS_MT_SMTC
/*
* If the EntryHi ASID being replaced happens to be
* the value flagged at ASID recycling time as having
* an extended life, clear the bit showing it being
* in use by this "CPU", and if that's the last bit,
* free up the ASID value for use and flush any old
* instances of it from the TLB.
*/
oldasid = (read_c0_entryhi() & ASID_MASK);
if(smtc_live_asid[mytlb][oldasid]) {
smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
if(smtc_live_asid[mytlb][oldasid] == 0)
smtc_flush_tlb_asid(oldasid);
}
/*
* Tread softly on EntryHi, and so long as we support
* having ASID_MASK smaller than the hardware maximum,
* make sure no "soft" bits become "hard"...
*/
write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
cpu_asid(cpu, next));
ehb(); /* Make sure it propagates to TCStatus */
evpe(mtflags);
#else
write_c0_entryhi(cpu_asid(cpu, next)); write_c0_entryhi(cpu_asid(cpu, next));
#endif /* CONFIG_MIPS_MT_SMTC */
TLBMISS_HANDLER_SETUP_PGD(next->pgd); TLBMISS_HANDLER_SETUP_PGD(next->pgd);
/* /*
...@@ -213,34 +161,12 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next) ...@@ -213,34 +161,12 @@ activate_mm(struct mm_struct *prev, struct mm_struct *next)
unsigned long flags; unsigned long flags;
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
#ifdef CONFIG_MIPS_MT_SMTC
unsigned long oldasid;
unsigned long mtflags;
int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id;
#endif /* CONFIG_MIPS_MT_SMTC */
local_irq_save(flags); local_irq_save(flags);
/* Unconditionally get a new ASID. */ /* Unconditionally get a new ASID. */
get_new_mmu_context(next, cpu); get_new_mmu_context(next, cpu);
#ifdef CONFIG_MIPS_MT_SMTC
/* See comments for similar code above */
mtflags = dvpe();
oldasid = read_c0_entryhi() & ASID_MASK;
if(smtc_live_asid[mytlb][oldasid]) {
smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
if(smtc_live_asid[mytlb][oldasid] == 0)
smtc_flush_tlb_asid(oldasid);
}
/* See comments for similar code above */
write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK) |
cpu_asid(cpu, next));
ehb(); /* Make sure it propagates to TCStatus */
evpe(mtflags);
#else
write_c0_entryhi(cpu_asid(cpu, next)); write_c0_entryhi(cpu_asid(cpu, next));
#endif /* CONFIG_MIPS_MT_SMTC */
TLBMISS_HANDLER_SETUP_PGD(next->pgd); TLBMISS_HANDLER_SETUP_PGD(next->pgd);
/* mark mmu ownership change */ /* mark mmu ownership change */
...@@ -258,48 +184,15 @@ static inline void ...@@ -258,48 +184,15 @@ static inline void
drop_mmu_context(struct mm_struct *mm, unsigned cpu) drop_mmu_context(struct mm_struct *mm, unsigned cpu)
{ {
unsigned long flags; unsigned long flags;
#ifdef CONFIG_MIPS_MT_SMTC
unsigned long oldasid;
/* Can't use spinlock because called from TLB flush within DVPE */
unsigned int prevvpe;
int mytlb = (smtc_status & SMTC_TLB_SHARED) ? 0 : cpu_data[cpu].vpe_id;
#endif /* CONFIG_MIPS_MT_SMTC */
local_irq_save(flags); local_irq_save(flags);
if (cpumask_test_cpu(cpu, mm_cpumask(mm))) { if (cpumask_test_cpu(cpu, mm_cpumask(mm))) {
get_new_mmu_context(mm, cpu); get_new_mmu_context(mm, cpu);
#ifdef CONFIG_MIPS_MT_SMTC
/* See comments for similar code above */
prevvpe = dvpe();
oldasid = (read_c0_entryhi() & ASID_MASK);
if (smtc_live_asid[mytlb][oldasid]) {
smtc_live_asid[mytlb][oldasid] &= ~(0x1 << cpu);
if(smtc_live_asid[mytlb][oldasid] == 0)
smtc_flush_tlb_asid(oldasid);
}
/* See comments for similar code above */
write_c0_entryhi((read_c0_entryhi() & ~HW_ASID_MASK)
| cpu_asid(cpu, mm));
ehb(); /* Make sure it propagates to TCStatus */
evpe(prevvpe);
#else /* not CONFIG_MIPS_MT_SMTC */
write_c0_entryhi(cpu_asid(cpu, mm)); write_c0_entryhi(cpu_asid(cpu, mm));
#endif /* CONFIG_MIPS_MT_SMTC */
} else { } else {
/* will get a new context next time */ /* will get a new context next time */
#ifndef CONFIG_MIPS_MT_SMTC
cpu_context(cpu, mm) = 0; cpu_context(cpu, mm) = 0;
#else /* SMTC */
int i;
/* SMTC shares the TLB (and ASIDs) across VPEs */
for_each_online_cpu(i) {
if((smtc_status & SMTC_TLB_SHARED)
|| (cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
cpu_context(i, mm) = 0;
}
#endif /* CONFIG_MIPS_MT_SMTC */
} }
local_irq_restore(flags); local_irq_restore(flags);
} }
......
...@@ -144,13 +144,7 @@ search_module_dbetables(unsigned long addr) ...@@ -144,13 +144,7 @@ search_module_dbetables(unsigned long addr)
#define MODULE_KERNEL_TYPE "64BIT " #define MODULE_KERNEL_TYPE "64BIT "
#endif #endif
#ifdef CONFIG_MIPS_MT_SMTC
#define MODULE_KERNEL_SMTC "MT_SMTC "
#else
#define MODULE_KERNEL_SMTC ""
#endif
#define MODULE_ARCH_VERMAGIC \ #define MODULE_ARCH_VERMAGIC \
MODULE_PROC_FAMILY MODULE_KERNEL_TYPE MODULE_KERNEL_SMTC MODULE_PROC_FAMILY MODULE_KERNEL_TYPE
#endif /* _ASM_MODULE_H */ #endif /* _ASM_MODULE_H */
...@@ -39,9 +39,6 @@ struct pt_regs { ...@@ -39,9 +39,6 @@ struct pt_regs {
unsigned long cp0_badvaddr; unsigned long cp0_badvaddr;
unsigned long cp0_cause; unsigned long cp0_cause;
unsigned long cp0_epc; unsigned long cp0_epc;
#ifdef CONFIG_MIPS_MT_SMTC
unsigned long cp0_tcstatus;
#endif /* CONFIG_MIPS_MT_SMTC */
#ifdef CONFIG_CPU_CAVIUM_OCTEON #ifdef CONFIG_CPU_CAVIUM_OCTEON
unsigned long long mpl[3]; /* MTM{0,1,2} */ unsigned long long mpl[3]; /* MTM{0,1,2} */
unsigned long long mtp[3]; /* MTP{0,1,2} */ unsigned long long mtp[3]; /* MTP{0,1,2} */
......
...@@ -43,11 +43,10 @@ ...@@ -43,11 +43,10 @@
: "i" (op), "R" (*(unsigned char *)(addr))) : "i" (op), "R" (*(unsigned char *)(addr)))
#ifdef CONFIG_MIPS_MT #ifdef CONFIG_MIPS_MT
/* /*
* Temporary hacks for SMTC debug. Optionally force single-threaded * Optionally force single-threaded execution during I-cache flushes.
* execution during I-cache flushes.
*/ */
#define PROTECT_CACHE_FLUSHES 1 #define PROTECT_CACHE_FLUSHES 1
#ifdef PROTECT_CACHE_FLUSHES #ifdef PROTECT_CACHE_FLUSHES
......
#ifndef _ASM_SMTC_MT_H
#define _ASM_SMTC_MT_H
/*
* Definitions for SMTC multitasking on MIPS MT cores
*/
#include <asm/mips_mt.h>
#include <asm/smtc_ipi.h>
/*
* System-wide SMTC status information
*/
extern unsigned int smtc_status;
#define SMTC_TLB_SHARED 0x00000001
#define SMTC_MTC_ACTIVE 0x00000002
/*
* TLB/ASID Management information
*/
#define MAX_SMTC_TLBS 2
#define MAX_SMTC_ASIDS 256
#if NR_CPUS <= 8
typedef char asiduse;
#else
#if NR_CPUS <= 16
typedef short asiduse;
#else
typedef long asiduse;
#endif
#endif
/*
* VPE Management information
*/
#define MAX_SMTC_VPES MAX_SMTC_TLBS /* FIXME: May not always be true. */
extern asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
struct mm_struct;
struct task_struct;
void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu);
void self_ipi(struct smtc_ipi *);
void smtc_flush_tlb_asid(unsigned long asid);
extern int smtc_build_cpu_map(int startslot);
extern void smtc_prepare_cpus(int cpus);
extern void smtc_smp_finish(void);
extern void smtc_boot_secondary(int cpu, struct task_struct *t);
extern void smtc_cpus_done(void);
extern void smtc_init_secondary(void);
/*
* Sharing the TLB between multiple VPEs means that the
* "random" index selection function is not allowed to
* select the current value of the Index register. To
* avoid additional TLB pressure, the Index registers
* are "parked" with an non-Valid value.
*/
#define PARKED_INDEX ((unsigned int)0x80000000)
/*
* Define low-level interrupt mask for IPIs, if necessary.
* By default, use SW interrupt 1, which requires no external
* hardware support, but which works only for single-core
* MIPS MT systems.
*/
#ifndef MIPS_CPU_IPI_IRQ
#define MIPS_CPU_IPI_IRQ 1
#endif
#endif /* _ASM_SMTC_MT_H */
/*
* Definitions used in MIPS MT SMTC "Interprocessor Interrupt" code.
*/
#ifndef __ASM_SMTC_IPI_H
#define __ASM_SMTC_IPI_H
#include <linux/spinlock.h>
//#define SMTC_IPI_DEBUG
#ifdef SMTC_IPI_DEBUG
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#endif /* SMTC_IPI_DEBUG */
/*
* An IPI "message"
*/
struct smtc_ipi {
struct smtc_ipi *flink;
int type;
void *arg;
int dest;
#ifdef SMTC_IPI_DEBUG
int sender;
long stamp;
#endif /* SMTC_IPI_DEBUG */
};
/*
* Defined IPI Types
*/
#define LINUX_SMP_IPI 1
#define SMTC_CLOCK_TICK 2
#define IRQ_AFFINITY_IPI 3
/*
* A queue of IPI messages
*/
struct smtc_ipi_q {
struct smtc_ipi *head;
spinlock_t lock;
struct smtc_ipi *tail;
int depth;
int resched_flag; /* reschedule already queued */
};
static inline void smtc_ipi_nq(struct smtc_ipi_q *q, struct smtc_ipi *p)
{
unsigned long flags;
spin_lock_irqsave(&q->lock, flags);
if (q->head == NULL)
q->head = q->tail = p;
else
q->tail->flink = p;
p->flink = NULL;
q->tail = p;
q->depth++;
#ifdef SMTC_IPI_DEBUG
p->sender = read_c0_tcbind();
p->stamp = read_c0_count();
#endif /* SMTC_IPI_DEBUG */
spin_unlock_irqrestore(&q->lock, flags);
}
static inline struct smtc_ipi *__smtc_ipi_dq(struct smtc_ipi_q *q)
{
struct smtc_ipi *p;
if (q->head == NULL)
p = NULL;
else {
p = q->head;
q->head = q->head->flink;
q->depth--;
/* Arguably unnecessary, but leaves queue cleaner */
if (q->head == NULL)
q->tail = NULL;
}
return p;
}
static inline struct smtc_ipi *smtc_ipi_dq(struct smtc_ipi_q *q)
{
unsigned long flags;
struct smtc_ipi *p;
spin_lock_irqsave(&q->lock, flags);
p = __smtc_ipi_dq(q);
spin_unlock_irqrestore(&q->lock, flags);
return p;
}
static inline void smtc_ipi_req(struct smtc_ipi_q *q, struct smtc_ipi *p)
{
unsigned long flags;
spin_lock_irqsave(&q->lock, flags);
if (q->head == NULL) {
q->head = q->tail = p;
p->flink = NULL;
} else {
p->flink = q->head;
q->head = p;
}
q->depth++;
spin_unlock_irqrestore(&q->lock, flags);
}
static inline int smtc_ipi_qdepth(struct smtc_ipi_q *q)
{
unsigned long flags;
int retval;
spin_lock_irqsave(&q->lock, flags);
retval = q->depth;
spin_unlock_irqrestore(&q->lock, flags);
return retval;
}
extern void smtc_send_ipi(int cpu, int type, unsigned int action);
#endif /* __ASM_SMTC_IPI_H */
/*
* Definitions for SMTC /proc entries
* Copyright(C) 2005 MIPS Technologies Inc.
*/
#ifndef __ASM_SMTC_PROC_H
#define __ASM_SMTC_PROC_H
/*
* per-"CPU" statistics
*/
struct smtc_cpu_proc {
unsigned long timerints;
unsigned long selfipis;
};
extern struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS];
/* Count of number of recoveries of "stolen" FPU access rights on 34K */
extern atomic_t smtc_fpu_recoveries;
#endif /* __ASM_SMTC_PROC_H */
...@@ -19,22 +19,12 @@ ...@@ -19,22 +19,12 @@
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
/* #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
* For SMTC kernel, global IE should be left set, and interrupts
* controlled exclusively via IXMT.
*/
#ifdef CONFIG_MIPS_MT_SMTC
#define STATMASK 0x1e
#elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
#define STATMASK 0x3f #define STATMASK 0x3f
#else #else
#define STATMASK 0x1f #define STATMASK 0x1f
#endif #endif
#ifdef CONFIG_MIPS_MT_SMTC
#include <asm/mipsmtregs.h>
#endif /* CONFIG_MIPS_MT_SMTC */
.macro SAVE_AT .macro SAVE_AT
.set push .set push
.set noat .set noat
...@@ -186,16 +176,6 @@ ...@@ -186,16 +176,6 @@
mfc0 v1, CP0_STATUS mfc0 v1, CP0_STATUS
LONG_S $2, PT_R2(sp) LONG_S $2, PT_R2(sp)
LONG_S v1, PT_STATUS(sp) LONG_S v1, PT_STATUS(sp)
#ifdef CONFIG_MIPS_MT_SMTC
/*
* Ideally, these instructions would be shuffled in
* to cover the pipeline delay.
*/
.set mips32
mfc0 k0, CP0_TCSTATUS
.set mips0
LONG_S k0, PT_TCSTATUS(sp)
#endif /* CONFIG_MIPS_MT_SMTC */
LONG_S $4, PT_R4(sp) LONG_S $4, PT_R4(sp)
mfc0 v1, CP0_CAUSE mfc0 v1, CP0_CAUSE
LONG_S $5, PT_R5(sp) LONG_S $5, PT_R5(sp)
...@@ -321,36 +301,6 @@ ...@@ -321,36 +301,6 @@
.set push .set push
.set reorder .set reorder
.set noat .set noat
#ifdef CONFIG_MIPS_MT_SMTC
.set mips32r2
/*
* We need to make sure the read-modify-write
* of Status below isn't perturbed by an interrupt
* or cross-TC access, so we need to do at least a DMT,
* protected by an interrupt-inhibit. But setting IXMT
* also creates a few-cycle window where an IPI could
* be queued and not be detected before potentially
* returning to a WAIT or user-mode loop. It must be
* replayed.
*
* We're in the middle of a context switch, and
* we can't dispatch it directly without trashing
* some registers, so we'll try to detect this unlikely
* case and program a software interrupt in the VPE,
* as would be done for a cross-VPE IPI. To accommodate
* the handling of that case, we're doing a DVPE instead
* of just a DMT here to protect against other threads.
* This is a lot of cruft to cover a tiny window.
* If you can find a better design, implement it!
*
*/
mfc0 v0, CP0_TCSTATUS
ori v0, TCSTATUS_IXMT
mtc0 v0, CP0_TCSTATUS
_ehb
DVPE 5 # dvpe a1
jal mips_ihb
#endif /* CONFIG_MIPS_MT_SMTC */
mfc0 a0, CP0_STATUS mfc0 a0, CP0_STATUS
ori a0, STATMASK ori a0, STATMASK
xori a0, STATMASK xori a0, STATMASK
...@@ -362,59 +312,6 @@ ...@@ -362,59 +312,6 @@
and v0, v1 and v0, v1
or v0, a0 or v0, a0
mtc0 v0, CP0_STATUS mtc0 v0, CP0_STATUS
#ifdef CONFIG_MIPS_MT_SMTC
/*
* Only after EXL/ERL have been restored to status can we
* restore TCStatus.IXMT.
*/
LONG_L v1, PT_TCSTATUS(sp)
_ehb
mfc0 a0, CP0_TCSTATUS
andi v1, TCSTATUS_IXMT
bnez v1, 0f
/*
* We'd like to detect any IPIs queued in the tiny window
* above and request an software interrupt to service them
* when we ERET.
*
* Computing the offset into the IPIQ array of the executing
* TC's IPI queue in-line would be tedious. We use part of
* the TCContext register to hold 16 bits of offset that we
* can add in-line to find the queue head.
*/
mfc0 v0, CP0_TCCONTEXT
la a2, IPIQ
srl v0, v0, 16
addu a2, a2, v0
LONG_L v0, 0(a2)
beqz v0, 0f
/*
* If we have a queue, provoke dispatch within the VPE by setting C_SW1
*/
mfc0 v0, CP0_CAUSE
ori v0, v0, C_SW1
mtc0 v0, CP0_CAUSE
0:
/*
* This test should really never branch but
* let's be prudent here. Having atomized
* the shared register modifications, we can
* now EVPE, and must do so before interrupts
* are potentially re-enabled.
*/
andi a1, a1, MVPCONTROL_EVP
beqz a1, 1f
evpe
1:
/* We know that TCStatua.IXMT should be set from above */
xori a0, a0, TCSTATUS_IXMT
or a0, a0, v1
mtc0 a0, CP0_TCSTATUS
_ehb
.set mips0
#endif /* CONFIG_MIPS_MT_SMTC */
LONG_L v1, PT_EPC(sp) LONG_L v1, PT_EPC(sp)
MTC0 v1, CP0_EPC MTC0 v1, CP0_EPC
LONG_L $31, PT_R31(sp) LONG_L $31, PT_R31(sp)
...@@ -467,33 +364,11 @@ ...@@ -467,33 +364,11 @@
* Set cp0 enable bit as sign that we're running on the kernel stack * Set cp0 enable bit as sign that we're running on the kernel stack
*/ */
.macro CLI .macro CLI
#if !defined(CONFIG_MIPS_MT_SMTC)
mfc0 t0, CP0_STATUS mfc0 t0, CP0_STATUS
li t1, ST0_CU0 | STATMASK li t1, ST0_CU0 | STATMASK
or t0, t1 or t0, t1
xori t0, STATMASK xori t0, STATMASK
mtc0 t0, CP0_STATUS mtc0 t0, CP0_STATUS
#else /* CONFIG_MIPS_MT_SMTC */
/*
* For SMTC, we need to set privilege
* and disable interrupts only for the
* current TC, using the TCStatus register.
*/
mfc0 t0, CP0_TCSTATUS
/* Fortunately CU 0 is in the same place in both registers */
/* Set TCU0, TMX, TKSU (for later inversion) and IXMT */
li t1, ST0_CU0 | 0x08001c00
or t0, t1
/* Clear TKSU, leave IXMT */
xori t0, 0x00001800
mtc0 t0, CP0_TCSTATUS
_ehb
/* We need to leave the global IE bit set, but clear EXL...*/
mfc0 t0, CP0_STATUS
ori t0, ST0_EXL | ST0_ERL
xori t0, ST0_EXL | ST0_ERL
mtc0 t0, CP0_STATUS
#endif /* CONFIG_MIPS_MT_SMTC */
irq_disable_hazard irq_disable_hazard
.endm .endm
...@@ -502,35 +377,11 @@ ...@@ -502,35 +377,11 @@
* Set cp0 enable bit as sign that we're running on the kernel stack * Set cp0 enable bit as sign that we're running on the kernel stack
*/ */
.macro STI .macro STI
#if !defined(CONFIG_MIPS_MT_SMTC)
mfc0 t0, CP0_STATUS mfc0 t0, CP0_STATUS
li t1, ST0_CU0 | STATMASK li t1, ST0_CU0 | STATMASK
or t0, t1 or t0, t1
xori t0, STATMASK & ~1 xori t0, STATMASK & ~1
mtc0 t0, CP0_STATUS mtc0 t0, CP0_STATUS
#else /* CONFIG_MIPS_MT_SMTC */
/*
* For SMTC, we need to set privilege
* and enable interrupts only for the
* current TC, using the TCStatus register.
*/
_ehb
mfc0 t0, CP0_TCSTATUS
/* Fortunately CU 0 is in the same place in both registers */
/* Set TCU0, TKSU (for later inversion) and IXMT */
li t1, ST0_CU0 | 0x08001c00
or t0, t1
/* Clear TKSU *and* IXMT */
xori t0, 0x00001c00
mtc0 t0, CP0_TCSTATUS
_ehb
/* We need to leave the global IE bit set, but clear EXL...*/
mfc0 t0, CP0_STATUS
ori t0, ST0_EXL
xori t0, ST0_EXL
mtc0 t0, CP0_STATUS
/* irq_enable_hazard below should expand to EHB for 24K/34K cpus */
#endif /* CONFIG_MIPS_MT_SMTC */
irq_enable_hazard irq_enable_hazard
.endm .endm
...@@ -540,32 +391,6 @@ ...@@ -540,32 +391,6 @@
* Set cp0 enable bit as sign that we're running on the kernel stack * Set cp0 enable bit as sign that we're running on the kernel stack
*/ */
.macro KMODE .macro KMODE
#ifdef CONFIG_MIPS_MT_SMTC
/*
* This gets baroque in SMTC. We want to
* protect the non-atomic clearing of EXL
* with DMT/EMT, but we don't want to take
* an interrupt while DMT is still in effect.
*/
/* KMODE gets invoked from both reorder and noreorder code */
.set push
.set mips32r2
.set noreorder
mfc0 v0, CP0_TCSTATUS
andi v1, v0, TCSTATUS_IXMT
ori v0, TCSTATUS_IXMT
mtc0 v0, CP0_TCSTATUS
_ehb
DMT 2 # dmt v0
/*
* We don't know a priori if ra is "live"
*/
move t0, ra
jal mips_ihb
nop /* delay slot */
move ra, t0
#endif /* CONFIG_MIPS_MT_SMTC */
mfc0 t0, CP0_STATUS mfc0 t0, CP0_STATUS
li t1, ST0_CU0 | (STATMASK & ~1) li t1, ST0_CU0 | (STATMASK & ~1)
#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX) #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
...@@ -576,25 +401,6 @@ ...@@ -576,25 +401,6 @@
or t0, t1 or t0, t1
xori t0, STATMASK & ~1 xori t0, STATMASK & ~1
mtc0 t0, CP0_STATUS mtc0 t0, CP0_STATUS
#ifdef CONFIG_MIPS_MT_SMTC
_ehb
andi v0, v0, VPECONTROL_TE
beqz v0, 2f
nop /* delay slot */
emt
2:
mfc0 v0, CP0_TCSTATUS
/* Clear IXMT, then OR in previous value */
ori v0, TCSTATUS_IXMT
xori v0, TCSTATUS_IXMT
or v0, v1, v0
mtc0 v0, CP0_TCSTATUS
/*
* irq_disable_hazard below should expand to EHB
* on 24K/34K CPUS
*/
.set pop
#endif /* CONFIG_MIPS_MT_SMTC */
irq_disable_hazard irq_disable_hazard
.endm .endm
......
...@@ -159,11 +159,7 @@ static inline struct thread_info *current_thread_info(void) ...@@ -159,11 +159,7 @@ static inline struct thread_info *current_thread_info(void)
* We stash processor id into a COP0 register to retrieve it fast * We stash processor id into a COP0 register to retrieve it fast
* at kernel exception entry. * at kernel exception entry.
*/ */
#if defined(CONFIG_MIPS_MT_SMTC) #if defined(CONFIG_MIPS_PGD_C0_CONTEXT)
#define SMP_CPUID_REG 2, 2 /* TCBIND */
#define ASM_SMP_CPUID_REG $2, 2
#define SMP_CPUID_PTRSHIFT 19
#elif defined(CONFIG_MIPS_PGD_C0_CONTEXT)
#define SMP_CPUID_REG 20, 0 /* XCONTEXT */ #define SMP_CPUID_REG 20, 0 /* XCONTEXT */
#define ASM_SMP_CPUID_REG $20 #define ASM_SMP_CPUID_REG $20
#define SMP_CPUID_PTRSHIFT 48 #define SMP_CPUID_PTRSHIFT 48
...@@ -179,13 +175,8 @@ static inline struct thread_info *current_thread_info(void) ...@@ -179,13 +175,8 @@ static inline struct thread_info *current_thread_info(void)
#define SMP_CPUID_REGSHIFT (SMP_CPUID_PTRSHIFT + 2) #define SMP_CPUID_REGSHIFT (SMP_CPUID_PTRSHIFT + 2)
#endif #endif
#ifdef CONFIG_MIPS_MT_SMTC
#define ASM_CPUID_MFC0 mfc0
#define UASM_i_CPUID_MFC0 uasm_i_mfc0
#else
#define ASM_CPUID_MFC0 MFC0 #define ASM_CPUID_MFC0 MFC0
#define UASM_i_CPUID_MFC0 UASM_i_MFC0 #define UASM_i_CPUID_MFC0 UASM_i_MFC0
#endif
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* _ASM_THREAD_INFO_H */ #endif /* _ASM_THREAD_INFO_H */
...@@ -52,14 +52,11 @@ extern int (*perf_irq)(void); ...@@ -52,14 +52,11 @@ extern int (*perf_irq)(void);
*/ */
extern unsigned int __weak get_c0_compare_int(void); extern unsigned int __weak get_c0_compare_int(void);
extern int r4k_clockevent_init(void); extern int r4k_clockevent_init(void);
extern int smtc_clockevent_init(void);
extern int gic_clockevent_init(void); extern int gic_clockevent_init(void);
static inline int mips_clockevent_init(void) static inline int mips_clockevent_init(void)
{ {
#ifdef CONFIG_MIPS_MT_SMTC #if defined(CONFIG_CEVT_GIC)
return smtc_clockevent_init();
#elif defined(CONFIG_CEVT_GIC)
return (gic_clockevent_init() | r4k_clockevent_init()); return (gic_clockevent_init() | r4k_clockevent_init());
#elif defined(CONFIG_CEVT_R4K) #elif defined(CONFIG_CEVT_R4K)
return r4k_clockevent_init(); return r4k_clockevent_init();
......
...@@ -17,7 +17,6 @@ endif ...@@ -17,7 +17,6 @@ endif
obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o
obj-$(CONFIG_MIPS_MT_SMTC) += cevt-smtc.o
obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o
obj-$(CONFIG_CEVT_GIC) += cevt-gic.o obj-$(CONFIG_CEVT_GIC) += cevt-gic.o
obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o
...@@ -50,7 +49,6 @@ obj-$(CONFIG_CPU_BMIPS) += smp-bmips.o bmips_vec.o ...@@ -50,7 +49,6 @@ obj-$(CONFIG_CPU_BMIPS) += smp-bmips.o bmips_vec.o
obj-$(CONFIG_MIPS_MT) += mips-mt.o obj-$(CONFIG_MIPS_MT) += mips-mt.o
obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o
obj-$(CONFIG_MIPS_MT_SMTC) += smtc.o smtc-asm.o smtc-proc.o
obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o
obj-$(CONFIG_MIPS_CMP) += smp-cmp.o obj-$(CONFIG_MIPS_CMP) += smp-cmp.o
obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o
......
...@@ -64,9 +64,6 @@ void output_ptreg_defines(void) ...@@ -64,9 +64,6 @@ void output_ptreg_defines(void)
OFFSET(PT_BVADDR, pt_regs, cp0_badvaddr); OFFSET(PT_BVADDR, pt_regs, cp0_badvaddr);
OFFSET(PT_STATUS, pt_regs, cp0_status); OFFSET(PT_STATUS, pt_regs, cp0_status);
OFFSET(PT_CAUSE, pt_regs, cp0_cause); OFFSET(PT_CAUSE, pt_regs, cp0_cause);
#ifdef CONFIG_MIPS_MT_SMTC
OFFSET(PT_TCSTATUS, pt_regs, cp0_tcstatus);
#endif /* CONFIG_MIPS_MT_SMTC */
#ifdef CONFIG_CPU_CAVIUM_OCTEON #ifdef CONFIG_CPU_CAVIUM_OCTEON
OFFSET(PT_MPL, pt_regs, mpl); OFFSET(PT_MPL, pt_regs, mpl);
OFFSET(PT_MTP, pt_regs, mtp); OFFSET(PT_MTP, pt_regs, mtp);
......
...@@ -12,17 +12,10 @@ ...@@ -12,17 +12,10 @@
#include <linux/smp.h> #include <linux/smp.h>
#include <linux/irq.h> #include <linux/irq.h>
#include <asm/smtc_ipi.h>
#include <asm/time.h> #include <asm/time.h>
#include <asm/cevt-r4k.h> #include <asm/cevt-r4k.h>
#include <asm/gic.h> #include <asm/gic.h>
/*
* The SMTC Kernel for the 34K, 1004K, et. al. replaces several
* of these routines with SMTC-specific variants.
*/
#ifndef CONFIG_MIPS_MT_SMTC
static int mips_next_event(unsigned long delta, static int mips_next_event(unsigned long delta,
struct clock_event_device *evt) struct clock_event_device *evt)
{ {
...@@ -36,8 +29,6 @@ static int mips_next_event(unsigned long delta, ...@@ -36,8 +29,6 @@ static int mips_next_event(unsigned long delta,
return res; return res;
} }
#endif /* CONFIG_MIPS_MT_SMTC */
void mips_set_clock_mode(enum clock_event_mode mode, void mips_set_clock_mode(enum clock_event_mode mode,
struct clock_event_device *evt) struct clock_event_device *evt)
{ {
...@@ -47,7 +38,6 @@ void mips_set_clock_mode(enum clock_event_mode mode, ...@@ -47,7 +38,6 @@ void mips_set_clock_mode(enum clock_event_mode mode,
DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device); DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
int cp0_timer_irq_installed; int cp0_timer_irq_installed;
#ifndef CONFIG_MIPS_MT_SMTC
irqreturn_t c0_compare_interrupt(int irq, void *dev_id) irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
{ {
const int r2 = cpu_has_mips_r2; const int r2 = cpu_has_mips_r2;
...@@ -82,8 +72,6 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id) ...@@ -82,8 +72,6 @@ irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
#endif /* Not CONFIG_MIPS_MT_SMTC */
struct irqaction c0_compare_irqaction = { struct irqaction c0_compare_irqaction = {
.handler = c0_compare_interrupt, .handler = c0_compare_interrupt,
.flags = IRQF_PERCPU | IRQF_TIMER, .flags = IRQF_PERCPU | IRQF_TIMER,
...@@ -170,7 +158,6 @@ int c0_compare_int_usable(void) ...@@ -170,7 +158,6 @@ int c0_compare_int_usable(void)
return 1; return 1;
} }
#ifndef CONFIG_MIPS_MT_SMTC
int r4k_clockevent_init(void) int r4k_clockevent_init(void)
{ {
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
...@@ -225,4 +212,3 @@ int r4k_clockevent_init(void) ...@@ -225,4 +212,3 @@ int r4k_clockevent_init(void)
return 0; return 0;
} }
#endif /* Not CONFIG_MIPS_MT_SMTC */
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
* Copyright (C) 2007 MIPS Technologies, Inc.
* Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
* Copyright (C) 2008 Kevin D. Kissell, Paralogos sarl
*/
#include <linux/clockchips.h>
#include <linux/interrupt.h>
#include <linux/percpu.h>
#include <linux/smp.h>
#include <linux/irq.h>
#include <asm/smtc_ipi.h>
#include <asm/time.h>
#include <asm/cevt-r4k.h>
/*
* Variant clock event timer support for SMTC on MIPS 34K, 1004K
* or other MIPS MT cores.
*
* Notes on SMTC Support:
*
* SMTC has multiple microthread TCs pretending to be Linux CPUs.
* But there's only one Count/Compare pair per VPE, and Compare
* interrupts are taken opportunisitically by available TCs
* bound to the VPE with the Count register. The new timer
* framework provides for global broadcasts, but we really
* want VPE-level multicasts for best behavior. So instead
* of invoking the high-level clock-event broadcast code,
* this version of SMTC support uses the historical SMTC
* multicast mechanisms "under the hood", appearing to the
* generic clock layer as if the interrupts are per-CPU.
*
* The approach taken here is to maintain a set of NR_CPUS
* virtual timers, and track which "CPU" needs to be alerted
* at each event.
*
* It's unlikely that we'll see a MIPS MT core with more than
* 2 VPEs, but we *know* that we won't need to handle more
* VPEs than we have "CPUs". So NCPUs arrays of NCPUs elements
* is always going to be overkill, but always going to be enough.
*/
unsigned long smtc_nexttime[NR_CPUS][NR_CPUS];
static int smtc_nextinvpe[NR_CPUS];
/*
* Timestamps stored are absolute values to be programmed
* into Count register. Valid timestamps will never be zero.
* If a Zero Count value is actually calculated, it is converted
* to be a 1, which will introduce 1 or two CPU cycles of error
* roughly once every four billion events, which at 1000 HZ means
* about once every 50 days. If that's actually a problem, one
* could alternate squashing 0 to 1 and to -1.
*/
#define MAKEVALID(x) (((x) == 0L) ? 1L : (x))
#define ISVALID(x) ((x) != 0L)
/*
* Time comparison is subtle, as it's really truncated
* modular arithmetic.
*/
#define IS_SOONER(a, b, reference) \
(((a) - (unsigned long)(reference)) < ((b) - (unsigned long)(reference)))
/*
* CATCHUP_INCREMENT, used when the function falls behind the counter.
* Could be an increasing function instead of a constant;
*/
#define CATCHUP_INCREMENT 64
static int mips_next_event(unsigned long delta,
struct clock_event_device *evt)
{
unsigned long flags;
unsigned int mtflags;
unsigned long timestamp, reference, previous;
unsigned long nextcomp = 0L;
int vpe = current_cpu_data.vpe_id;
int cpu = smp_processor_id();
local_irq_save(flags);
mtflags = dmt();
/*
* Maintain the per-TC virtual timer
* and program the per-VPE shared Count register
* as appropriate here...
*/
reference = (unsigned long)read_c0_count();
timestamp = MAKEVALID(reference + delta);
/*
* To really model the clock, we have to catch the case
* where the current next-in-VPE timestamp is the old
* timestamp for the calling CPE, but the new value is
* in fact later. In that case, we have to do a full
* scan and discover the new next-in-VPE CPU id and
* timestamp.
*/
previous = smtc_nexttime[vpe][cpu];
if (cpu == smtc_nextinvpe[vpe] && ISVALID(previous)
&& IS_SOONER(previous, timestamp, reference)) {
int i;
int soonest = cpu;
/*
* Update timestamp array here, so that new
* value gets considered along with those of
* other virtual CPUs on the VPE.
*/
smtc_nexttime[vpe][cpu] = timestamp;
for_each_online_cpu(i) {
if (ISVALID(smtc_nexttime[vpe][i])
&& IS_SOONER(smtc_nexttime[vpe][i],
smtc_nexttime[vpe][soonest], reference)) {
soonest = i;
}
}
smtc_nextinvpe[vpe] = soonest;
nextcomp = smtc_nexttime[vpe][soonest];
/*
* Otherwise, we don't have to process the whole array rank,
* we just have to see if the event horizon has gotten closer.
*/
} else {
if (!ISVALID(smtc_nexttime[vpe][smtc_nextinvpe[vpe]]) ||
IS_SOONER(timestamp,
smtc_nexttime[vpe][smtc_nextinvpe[vpe]], reference)) {
smtc_nextinvpe[vpe] = cpu;
nextcomp = timestamp;
}
/*
* Since next-in-VPE may me the same as the executing
* virtual CPU, we update the array *after* checking
* its value.
*/
smtc_nexttime[vpe][cpu] = timestamp;
}
/*
* It may be that, in fact, we don't need to update Compare,
* but if we do, we want to make sure we didn't fall into
* a crack just behind Count.
*/
if (ISVALID(nextcomp)) {
write_c0_compare(nextcomp);
ehb();
/*
* We never return an error, we just make sure
* that we trigger the handlers as quickly as
* we can if we fell behind.
*/
while ((nextcomp - (unsigned long)read_c0_count())
> (unsigned long)LONG_MAX) {
nextcomp += CATCHUP_INCREMENT;
write_c0_compare(nextcomp);
ehb();
}
}
emt(mtflags);
local_irq_restore(flags);
return 0;
}
void smtc_distribute_timer(int vpe)
{
unsigned long flags;
unsigned int mtflags;
int cpu;
struct clock_event_device *cd;
unsigned long nextstamp;
unsigned long reference;
repeat:
nextstamp = 0L;
for_each_online_cpu(cpu) {
/*
* Find virtual CPUs within the current VPE who have
* unserviced timer requests whose time is now past.
*/
local_irq_save(flags);
mtflags = dmt();
if (cpu_data[cpu].vpe_id == vpe &&
ISVALID(smtc_nexttime[vpe][cpu])) {
reference = (unsigned long)read_c0_count();
if ((smtc_nexttime[vpe][cpu] - reference)
> (unsigned long)LONG_MAX) {
smtc_nexttime[vpe][cpu] = 0L;
emt(mtflags);
local_irq_restore(flags);
/*
* We don't send IPIs to ourself.
*/
if (cpu != smp_processor_id()) {
smtc_send_ipi(cpu, SMTC_CLOCK_TICK, 0);
} else {
cd = &per_cpu(mips_clockevent_device, cpu);
cd->event_handler(cd);
}
} else {
/* Local to VPE but Valid Time not yet reached. */
if (!ISVALID(nextstamp) ||
IS_SOONER(smtc_nexttime[vpe][cpu], nextstamp,
reference)) {
smtc_nextinvpe[vpe] = cpu;
nextstamp = smtc_nexttime[vpe][cpu];
}
emt(mtflags);
local_irq_restore(flags);
}
} else {
emt(mtflags);
local_irq_restore(flags);
}
}
/* Reprogram for interrupt at next soonest timestamp for VPE */
if (ISVALID(nextstamp)) {
write_c0_compare(nextstamp);
ehb();
if ((nextstamp - (unsigned long)read_c0_count())
> (unsigned long)LONG_MAX)
goto repeat;
}
}
irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
{
int cpu = smp_processor_id();
/* If we're running SMTC, we've got MIPS MT and therefore MIPS32R2 */
handle_perf_irq(1);
if (read_c0_cause() & (1 << 30)) {
/* Clear Count/Compare Interrupt */
write_c0_compare(read_c0_compare());
smtc_distribute_timer(cpu_data[cpu].vpe_id);
}
return IRQ_HANDLED;
}
int smtc_clockevent_init(void)
{
uint64_t mips_freq = mips_hpt_frequency;
unsigned int cpu = smp_processor_id();
struct clock_event_device *cd;
unsigned int irq;
int i;
int j;
if (!cpu_has_counter || !mips_hpt_frequency)
return -ENXIO;
if (cpu == 0) {
for (i = 0; i < num_possible_cpus(); i++) {
smtc_nextinvpe[i] = 0;
for (j = 0; j < num_possible_cpus(); j++)
smtc_nexttime[i][j] = 0L;
}
/*
* SMTC also can't have the usablility test
* run by secondary TCs once Compare is in use.
*/
if (!c0_compare_int_usable())
return -ENXIO;
}
/*
* With vectored interrupts things are getting platform specific.
* get_c0_compare_int is a hook to allow a platform to return the
* interrupt number of it's liking.
*/
irq = MIPS_CPU_IRQ_BASE + cp0_compare_irq;
if (get_c0_compare_int)
irq = get_c0_compare_int();
cd = &per_cpu(mips_clockevent_device, cpu);
cd->name = "MIPS";
cd->features = CLOCK_EVT_FEAT_ONESHOT;
/* Calculate the min / max delta */
cd->mult = div_sc((unsigned long) mips_freq, NSEC_PER_SEC, 32);
cd->shift = 32;
cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
cd->rating = 300;
cd->irq = irq;
cd->cpumask = cpumask_of(cpu);
cd->set_next_event = mips_next_event;
cd->set_mode = mips_set_clock_mode;
cd->event_handler = mips_event_handler;
clockevents_register_device(cd);
/*
* On SMTC we only want to do the data structure
* initialization and IRQ setup once.
*/
if (cpu)
return 0;
/*
* And we need the hwmask associated with the c0_compare
* vector to be initialized.
*/
irq_hwmask[irq] = (0x100 << cp0_compare_irq);
if (cp0_timer_irq_installed)
return 0;
cp0_timer_irq_installed = 1;
setup_irq(irq, &c0_compare_irqaction);
return 0;
}
...@@ -62,7 +62,7 @@ static inline void check_errata(void) ...@@ -62,7 +62,7 @@ static inline void check_errata(void)
case CPU_34K: case CPU_34K:
/* /*
* Erratum "RPS May Cause Incorrect Instruction Execution" * Erratum "RPS May Cause Incorrect Instruction Execution"
* This code only handles VPE0, any SMP/SMTC/RTOS code * This code only handles VPE0, any SMP/RTOS code
* making use of VPE1 will be responsable for that VPE. * making use of VPE1 will be responsable for that VPE.
*/ */
if ((c->processor_id & PRID_REV_MASK) <= PRID_REV_34K_V1_0_2) if ((c->processor_id & PRID_REV_MASK) <= PRID_REV_34K_V1_0_2)
......
...@@ -16,9 +16,6 @@ ...@@ -16,9 +16,6 @@
#include <asm/isadep.h> #include <asm/isadep.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/war.h> #include <asm/war.h>
#ifdef CONFIG_MIPS_MT_SMTC
#include <asm/mipsmtregs.h>
#endif
#ifndef CONFIG_PREEMPT #ifndef CONFIG_PREEMPT
#define resume_kernel restore_all #define resume_kernel restore_all
...@@ -89,41 +86,6 @@ FEXPORT(syscall_exit) ...@@ -89,41 +86,6 @@ FEXPORT(syscall_exit)
bnez t0, syscall_exit_work bnez t0, syscall_exit_work
restore_all: # restore full frame restore_all: # restore full frame
#ifdef CONFIG_MIPS_MT_SMTC
#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
/* Re-arm any temporarily masked interrupts not explicitly "acked" */
mfc0 v0, CP0_TCSTATUS
ori v1, v0, TCSTATUS_IXMT
mtc0 v1, CP0_TCSTATUS
andi v0, TCSTATUS_IXMT
_ehb
mfc0 t0, CP0_TCCONTEXT
DMT 9 # dmt t1
jal mips_ihb
mfc0 t2, CP0_STATUS
andi t3, t0, 0xff00
or t2, t2, t3
mtc0 t2, CP0_STATUS
_ehb
andi t1, t1, VPECONTROL_TE
beqz t1, 1f
EMT
1:
mfc0 v1, CP0_TCSTATUS
/* We set IXMT above, XOR should clear it here */
xori v1, v1, TCSTATUS_IXMT
or v1, v0, v1
mtc0 v1, CP0_TCSTATUS
_ehb
xor t0, t0, t3
mtc0 t0, CP0_TCCONTEXT
#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
/* Detect and execute deferred IPI "interrupts" */
LONG_L s0, TI_REGS($28)
LONG_S sp, TI_REGS($28)
jal deferred_smtc_ipi
LONG_S s0, TI_REGS($28)
#endif /* CONFIG_MIPS_MT_SMTC */
.set noat .set noat
RESTORE_TEMP RESTORE_TEMP
RESTORE_AT RESTORE_AT
......
...@@ -21,20 +21,6 @@ ...@@ -21,20 +21,6 @@
#include <asm/war.h> #include <asm/war.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#ifdef CONFIG_MIPS_MT_SMTC
#define PANIC_PIC(msg) \
.set push; \
.set nomicromips; \
.set reorder; \
PTR_LA a0,8f; \
.set noat; \
PTR_LA AT, panic; \
jr AT; \
9: b 9b; \
.set pop; \
TEXT(msg)
#endif
__INIT __INIT
/* /*
...@@ -251,15 +237,6 @@ NESTED(except_vec_vi, 0, sp) ...@@ -251,15 +237,6 @@ NESTED(except_vec_vi, 0, sp)
SAVE_AT SAVE_AT
.set push .set push
.set noreorder .set noreorder
#ifdef CONFIG_MIPS_MT_SMTC
/*
* To keep from blindly blocking *all* interrupts
* during service by SMTC kernel, we also want to
* pass the IM value to be cleared.
*/
FEXPORT(except_vec_vi_mori)
ori a0, $0, 0
#endif /* CONFIG_MIPS_MT_SMTC */
PTR_LA v1, except_vec_vi_handler PTR_LA v1, except_vec_vi_handler
FEXPORT(except_vec_vi_lui) FEXPORT(except_vec_vi_lui)
lui v0, 0 /* Patched */ lui v0, 0 /* Patched */
...@@ -277,37 +254,10 @@ EXPORT(except_vec_vi_end) ...@@ -277,37 +254,10 @@ EXPORT(except_vec_vi_end)
NESTED(except_vec_vi_handler, 0, sp) NESTED(except_vec_vi_handler, 0, sp)
SAVE_TEMP SAVE_TEMP
SAVE_STATIC SAVE_STATIC
#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC has an interesting problem that interrupts are level-triggered,
* and the CLI macro will clear EXL, potentially causing a duplicate
* interrupt service invocation. So we need to clear the associated
* IM bit of Status prior to doing CLI, and restore it after the
* service routine has been invoked - we must assume that the
* service routine will have cleared the state, and any active
* level represents a new or otherwised unserviced event...
*/
mfc0 t1, CP0_STATUS
and t0, a0, t1
#ifdef CONFIG_MIPS_MT_SMTC_IM_BACKSTOP
mfc0 t2, CP0_TCCONTEXT
or t2, t0, t2
mtc0 t2, CP0_TCCONTEXT
#endif /* CONFIG_MIPS_MT_SMTC_IM_BACKSTOP */
xor t1, t1, t0
mtc0 t1, CP0_STATUS
_ehb
#endif /* CONFIG_MIPS_MT_SMTC */
CLI CLI
#ifdef CONFIG_TRACE_IRQFLAGS #ifdef CONFIG_TRACE_IRQFLAGS
move s0, v0 move s0, v0
#ifdef CONFIG_MIPS_MT_SMTC
move s1, a0
#endif
TRACE_IRQS_OFF TRACE_IRQS_OFF
#ifdef CONFIG_MIPS_MT_SMTC
move a0, s1
#endif
move v0, s0 move v0, s0
#endif #endif
...@@ -496,9 +446,6 @@ NESTED(nmi_handler, PT_SIZE, sp) ...@@ -496,9 +446,6 @@ NESTED(nmi_handler, PT_SIZE, sp)
.align 5 .align 5
LEAF(handle_ri_rdhwr_vivt) LEAF(handle_ri_rdhwr_vivt)
#ifdef CONFIG_MIPS_MT_SMTC
PANIC_PIC("handle_ri_rdhwr_vivt called")
#else
.set push .set push
.set noat .set noat
.set noreorder .set noreorder
...@@ -517,7 +464,6 @@ NESTED(nmi_handler, PT_SIZE, sp) ...@@ -517,7 +464,6 @@ NESTED(nmi_handler, PT_SIZE, sp)
.set pop .set pop
bltz k1, handle_ri /* slow path */ bltz k1, handle_ri /* slow path */
/* fall thru */ /* fall thru */
#endif
END(handle_ri_rdhwr_vivt) END(handle_ri_rdhwr_vivt)
LEAF(handle_ri_rdhwr) LEAF(handle_ri_rdhwr)
......
...@@ -35,33 +35,12 @@ ...@@ -35,33 +35,12 @@
*/ */
.macro setup_c0_status set clr .macro setup_c0_status set clr
.set push .set push
#ifdef CONFIG_MIPS_MT_SMTC
/*
* For SMTC, we need to set privilege and disable interrupts only for
* the current TC, using the TCStatus register.
*/
mfc0 t0, CP0_TCSTATUS
/* Fortunately CU 0 is in the same place in both registers */
/* Set TCU0, TMX, TKSU (for later inversion) and IXMT */
li t1, ST0_CU0 | 0x08001c00
or t0, t1
/* Clear TKSU, leave IXMT */
xori t0, 0x00001800
mtc0 t0, CP0_TCSTATUS
_ehb
/* We need to leave the global IE bit set, but clear EXL...*/
mfc0 t0, CP0_STATUS
or t0, ST0_CU0 | ST0_EXL | ST0_ERL | \set | \clr
xor t0, ST0_EXL | ST0_ERL | \clr
mtc0 t0, CP0_STATUS
#else
mfc0 t0, CP0_STATUS mfc0 t0, CP0_STATUS
or t0, ST0_CU0|\set|0x1f|\clr or t0, ST0_CU0|\set|0x1f|\clr
xor t0, 0x1f|\clr xor t0, 0x1f|\clr
mtc0 t0, CP0_STATUS mtc0 t0, CP0_STATUS
.set noreorder .set noreorder
sll zero,3 # ehb sll zero,3 # ehb
#endif
.set pop .set pop
.endm .endm
...@@ -115,24 +94,6 @@ NESTED(kernel_entry, 16, sp) # kernel entry point ...@@ -115,24 +94,6 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
jr t0 jr t0
0: 0:
#ifdef CONFIG_MIPS_MT_SMTC
/*
* In SMTC kernel, "CLI" is thread-specific, in TCStatus.
* We still need to enable interrupts globally in Status,
* and clear EXL/ERL.
*
* TCContext is used to track interrupt levels under
* service in SMTC kernel. Clear for boot TC before
* allowing any interrupts.
*/
mtc0 zero, CP0_TCCONTEXT
mfc0 t0, CP0_STATUS
ori t0, t0, 0xff1f
xori t0, t0, 0x001e
mtc0 t0, CP0_STATUS
#endif /* CONFIG_MIPS_MT_SMTC */
PTR_LA t0, __bss_start # clear .bss PTR_LA t0, __bss_start # clear .bss
LONG_S zero, (t0) LONG_S zero, (t0)
PTR_LA t1, __bss_stop - LONGSIZE PTR_LA t1, __bss_stop - LONGSIZE
...@@ -164,25 +125,8 @@ NESTED(kernel_entry, 16, sp) # kernel entry point ...@@ -164,25 +125,8 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
* function after setting up the stack and gp registers. * function after setting up the stack and gp registers.
*/ */
NESTED(smp_bootstrap, 16, sp) NESTED(smp_bootstrap, 16, sp)
#ifdef CONFIG_MIPS_MT_SMTC
/*
* Read-modify-writes of Status must be atomic, and this
* is one case where CLI is invoked without EXL being
* necessarily set. The CLI and setup_c0_status will
* in fact be redundant for all but the first TC of
* each VPE being booted.
*/
DMT 10 # dmt t2 /* t0, t1 are used by CLI and setup_c0_status() */
jal mips_ihb
#endif /* CONFIG_MIPS_MT_SMTC */
smp_slave_setup smp_slave_setup
setup_c0_status_sec setup_c0_status_sec
#ifdef CONFIG_MIPS_MT_SMTC
andi t2, t2, VPECONTROL_TE
beqz t2, 2f
EMT # emt
2:
#endif /* CONFIG_MIPS_MT_SMTC */
j start_secondary j start_secondary
END(smp_bootstrap) END(smp_bootstrap)
#endif /* CONFIG_SMP */ #endif /* CONFIG_SMP */
...@@ -42,9 +42,6 @@ static struct irq_chip i8259A_chip = { ...@@ -42,9 +42,6 @@ static struct irq_chip i8259A_chip = {
.irq_disable = disable_8259A_irq, .irq_disable = disable_8259A_irq,
.irq_unmask = enable_8259A_irq, .irq_unmask = enable_8259A_irq,
.irq_mask_ack = mask_and_ack_8259A, .irq_mask_ack = mask_and_ack_8259A,
#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
.irq_set_affinity = plat_set_irq_affinity,
#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
}; };
/* /*
...@@ -180,7 +177,6 @@ static void mask_and_ack_8259A(struct irq_data *d) ...@@ -180,7 +177,6 @@ static void mask_and_ack_8259A(struct irq_data *d)
outb(cached_master_mask, PIC_MASTER_IMR); outb(cached_master_mask, PIC_MASTER_IMR);
outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */ outb(0x60+irq, PIC_MASTER_CMD); /* 'Specific EOI to master */
} }
smtc_im_ack_irq(irq);
raw_spin_unlock_irqrestore(&i8259A_lock, flags); raw_spin_unlock_irqrestore(&i8259A_lock, flags);
return; return;
......
...@@ -229,18 +229,8 @@ void __init check_wait(void) ...@@ -229,18 +229,8 @@ void __init check_wait(void)
} }
} }
static void smtc_idle_hook(void)
{
#ifdef CONFIG_MIPS_MT_SMTC
void smtc_idle_loop_hook(void);
smtc_idle_loop_hook();
#endif
}
void arch_cpu_idle(void) void arch_cpu_idle(void)
{ {
smtc_idle_hook();
if (cpu_wait) if (cpu_wait)
cpu_wait(); cpu_wait();
else else
......
...@@ -53,13 +53,9 @@ static inline void unmask_msc_irq(struct irq_data *d) ...@@ -53,13 +53,9 @@ static inline void unmask_msc_irq(struct irq_data *d)
*/ */
static void level_mask_and_ack_msc_irq(struct irq_data *d) static void level_mask_and_ack_msc_irq(struct irq_data *d)
{ {
unsigned int irq = d->irq;
mask_msc_irq(d); mask_msc_irq(d);
if (!cpu_has_veic) if (!cpu_has_veic)
MSCIC_WRITE(MSC01_IC_EOI, 0); MSCIC_WRITE(MSC01_IC_EOI, 0);
/* This actually needs to be a call into platform code */
smtc_im_ack_irq(irq);
} }
/* /*
...@@ -78,7 +74,6 @@ static void edge_mask_and_ack_msc_irq(struct irq_data *d) ...@@ -78,7 +74,6 @@ static void edge_mask_and_ack_msc_irq(struct irq_data *d)
MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT); MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT);
MSCIC_WRITE(MSC01_IC_SUP+irq*8, r); MSCIC_WRITE(MSC01_IC_SUP+irq*8, r);
} }
smtc_im_ack_irq(irq);
} }
/* /*
......
...@@ -73,7 +73,6 @@ void free_irqno(unsigned int irq) ...@@ -73,7 +73,6 @@ void free_irqno(unsigned int irq)
*/ */
void ack_bad_irq(unsigned int irq) void ack_bad_irq(unsigned int irq)
{ {
smtc_im_ack_irq(irq);
printk("unexpected IRQ # %d\n", irq); printk("unexpected IRQ # %d\n", irq);
} }
...@@ -142,23 +141,7 @@ void __irq_entry do_IRQ(unsigned int irq) ...@@ -142,23 +141,7 @@ void __irq_entry do_IRQ(unsigned int irq)
{ {
irq_enter(); irq_enter();
check_stack_overflow(); check_stack_overflow();
if (!smtc_handle_on_other_cpu(irq))
generic_handle_irq(irq); generic_handle_irq(irq);
irq_exit(); irq_exit();
} }
#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
/*
* To avoid inefficient and in some cases pathological re-checking of
* IRQ affinity, we have this variant that skips the affinity check.
*/
void __irq_entry do_IRQ_no_affinity(unsigned int irq)
{
irq_enter();
smtc_im_backstop(irq);
generic_handle_irq(irq);
irq_exit();
}
#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
/* /*
* General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels * General MIPS MT support routines, usable in AP/SP and SMVP.
* Copyright (C) 2005 Mips Technologies, Inc * Copyright (C) 2005 Mips Technologies, Inc
*/ */
#include <linux/cpu.h> #include <linux/cpu.h>
......
/* /*
* General MIPS MT support routines, usable in AP/SP, SMVP, or SMTC kernels * General MIPS MT support routines, usable in AP/SP and SMVP.
* Copyright (C) 2005 Mips Technologies, Inc * Copyright (C) 2005 Mips Technologies, Inc
*/ */
...@@ -57,9 +57,6 @@ void mips_mt_regdump(unsigned long mvpctl) ...@@ -57,9 +57,6 @@ void mips_mt_regdump(unsigned long mvpctl)
int tc; int tc;
unsigned long haltval; unsigned long haltval;
unsigned long tcstatval; unsigned long tcstatval;
#ifdef CONFIG_MIPS_MT_SMTC
void smtc_soft_dump(void);
#endif /* CONFIG_MIPT_MT_SMTC */
local_irq_save(flags); local_irq_save(flags);
vpflags = dvpe(); vpflags = dvpe();
...@@ -116,9 +113,6 @@ void mips_mt_regdump(unsigned long mvpctl) ...@@ -116,9 +113,6 @@ void mips_mt_regdump(unsigned long mvpctl)
if (!haltval) if (!haltval)
write_tc_c0_tchalt(0); write_tc_c0_tchalt(0);
} }
#ifdef CONFIG_MIPS_MT_SMTC
smtc_soft_dump();
#endif /* CONFIG_MIPT_MT_SMTC */
printk("===========================\n"); printk("===========================\n");
evpe(vpflags); evpe(vpflags);
local_irq_restore(flags); local_irq_restore(flags);
...@@ -295,21 +289,11 @@ void mips_mt_set_cpuoptions(void) ...@@ -295,21 +289,11 @@ void mips_mt_set_cpuoptions(void)
void mt_cflush_lockdown(void) void mt_cflush_lockdown(void)
{ {
#ifdef CONFIG_MIPS_MT_SMTC
void smtc_cflush_lockdown(void);
smtc_cflush_lockdown();
#endif /* CONFIG_MIPS_MT_SMTC */
/* FILL IN VSMP and AP/SP VERSIONS HERE */ /* FILL IN VSMP and AP/SP VERSIONS HERE */
} }
void mt_cflush_release(void) void mt_cflush_release(void)
{ {
#ifdef CONFIG_MIPS_MT_SMTC
void smtc_cflush_release(void);
smtc_cflush_release();
#endif /* CONFIG_MIPS_MT_SMTC */
/* FILL IN VSMP and AP/SP VERSIONS HERE */ /* FILL IN VSMP and AP/SP VERSIONS HERE */
} }
......
...@@ -140,13 +140,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp, ...@@ -140,13 +140,6 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
*/ */
childregs->cp0_status &= ~(ST0_CU2|ST0_CU1); childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC restores TCStatus after Status, and the CU bits
* are aliased there.
*/
childregs->cp0_tcstatus &= ~(ST0_CU2|ST0_CU1);
#endif
clear_tsk_thread_flag(p, TIF_USEDFPU); clear_tsk_thread_flag(p, TIF_USEDFPU);
#ifdef CONFIG_MIPS_MT_FPAFF #ifdef CONFIG_MIPS_MT_FPAFF
......
...@@ -87,18 +87,6 @@ ...@@ -87,18 +87,6 @@
PTR_ADDU t0, $28, _THREAD_SIZE - 32 PTR_ADDU t0, $28, _THREAD_SIZE - 32
set_saved_sp t0, t1, t2 set_saved_sp t0, t1, t2
#ifdef CONFIG_MIPS_MT_SMTC
/* Read-modify-writes of Status must be atomic on a VPE */
mfc0 t2, CP0_TCSTATUS
ori t1, t2, TCSTATUS_IXMT
mtc0 t1, CP0_TCSTATUS
andi t2, t2, TCSTATUS_IXMT
_ehb
DMT 8 # dmt t0
move t1,ra
jal mips_ihb
move ra,t1
#endif /* CONFIG_MIPS_MT_SMTC */
mfc0 t1, CP0_STATUS /* Do we really need this? */ mfc0 t1, CP0_STATUS /* Do we really need this? */
li a3, 0xff01 li a3, 0xff01
and t1, a3 and t1, a3
...@@ -107,18 +95,6 @@ ...@@ -107,18 +95,6 @@
and a2, a3 and a2, a3
or a2, t1 or a2, t1
mtc0 a2, CP0_STATUS mtc0 a2, CP0_STATUS
#ifdef CONFIG_MIPS_MT_SMTC
_ehb
andi t0, t0, VPECONTROL_TE
beqz t0, 1f
emt
1:
mfc0 t1, CP0_TCSTATUS
xori t1, t1, TCSTATUS_IXMT
or t1, t1, t2
mtc0 t1, CP0_TCSTATUS
_ehb
#endif /* CONFIG_MIPS_MT_SMTC */
move v0, a0 move v0, a0
jr ra jr ra
END(resume) END(resume)
...@@ -176,19 +152,10 @@ LEAF(_restore_msa) ...@@ -176,19 +152,10 @@ LEAF(_restore_msa)
#define FPU_DEFAULT 0x00000000 #define FPU_DEFAULT 0x00000000
LEAF(_init_fpu) LEAF(_init_fpu)
#ifdef CONFIG_MIPS_MT_SMTC
/* Rather than manipulate per-VPE Status, set per-TC bit in TCStatus */
mfc0 t0, CP0_TCSTATUS
/* Bit position is the same for Status, TCStatus */
li t1, ST0_CU1
or t0, t1
mtc0 t0, CP0_TCSTATUS
#else /* Normal MIPS CU1 enable */
mfc0 t0, CP0_STATUS mfc0 t0, CP0_STATUS
li t1, ST0_CU1 li t1, ST0_CU1
or t0, t1 or t0, t1
mtc0 t0, CP0_STATUS mtc0 t0, CP0_STATUS
#endif /* CONFIG_MIPS_MT_SMTC */
enable_fpu_hazard enable_fpu_hazard
li t1, FPU_DEFAULT li t1, FPU_DEFAULT
......
...@@ -36,7 +36,6 @@ static irqreturn_t rtlx_interrupt(int irq, void *dev_id) ...@@ -36,7 +36,6 @@ static irqreturn_t rtlx_interrupt(int irq, void *dev_id)
unsigned long flags; unsigned long flags;
int i; int i;
/* Ought not to be strictly necessary for SMTC builds */
local_irq_save(flags); local_irq_save(flags);
vpeflags = dvpe(); vpeflags = dvpe();
set_c0_status(0x100 << MIPS_CPU_RTLX_IRQ); set_c0_status(0x100 << MIPS_CPU_RTLX_IRQ);
......
...@@ -49,14 +49,11 @@ static void cmp_init_secondary(void) ...@@ -49,14 +49,11 @@ static void cmp_init_secondary(void)
/* Enable per-cpu interrupts: platform specific */ /* Enable per-cpu interrupts: platform specific */
#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) #ifdef CONFIG_MIPS_MT_SMP
if (cpu_has_mipsmt) if (cpu_has_mipsmt)
c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) & c->vpe_id = (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) &
TCBIND_CURVPE; TCBIND_CURVPE;
#endif #endif
#ifdef CONFIG_MIPS_MT_SMTC
c->tc_id = (read_c0_tcbind() & TCBIND_CURTC) >> TCBIND_CURTC_SHIFT;
#endif
} }
static void cmp_smp_finish(void) static void cmp_smp_finish(void)
...@@ -135,10 +132,6 @@ void __init cmp_smp_setup(void) ...@@ -135,10 +132,6 @@ void __init cmp_smp_setup(void)
unsigned int mvpconf0 = read_c0_mvpconf0(); unsigned int mvpconf0 = read_c0_mvpconf0();
nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1; nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
#elif defined(CONFIG_MIPS_MT_SMTC)
unsigned int mvpconf0 = read_c0_mvpconf0();
nvpe = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
#endif #endif
smp_num_siblings = nvpe; smp_num_siblings = nvpe;
} }
......
...@@ -43,10 +43,6 @@ ...@@ -43,10 +43,6 @@
#include <asm/time.h> #include <asm/time.h>
#include <asm/setup.h> #include <asm/setup.h>
#ifdef CONFIG_MIPS_MT_SMTC
#include <asm/mipsmtregs.h>
#endif /* CONFIG_MIPS_MT_SMTC */
volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */ volatile cpumask_t cpu_callin_map; /* Bitmask of started secondaries */
int __cpu_number_map[NR_CPUS]; /* Map physical to logical */ int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
...@@ -102,12 +98,6 @@ asmlinkage void start_secondary(void) ...@@ -102,12 +98,6 @@ asmlinkage void start_secondary(void)
{ {
unsigned int cpu; unsigned int cpu;
#ifdef CONFIG_MIPS_MT_SMTC
/* Only do cpu_probe for first TC of CPU */
if ((read_c0_tcbind() & TCBIND_CURTC) != 0)
__cpu_name[smp_processor_id()] = __cpu_name[0];
else
#endif /* CONFIG_MIPS_MT_SMTC */
cpu_probe(); cpu_probe();
cpu_report(); cpu_report();
per_cpu_trap_init(false); per_cpu_trap_init(false);
...@@ -238,13 +228,10 @@ static void flush_tlb_mm_ipi(void *mm) ...@@ -238,13 +228,10 @@ static void flush_tlb_mm_ipi(void *mm)
* o collapses to normal function call on UP kernels * o collapses to normal function call on UP kernels
* o collapses to normal function call on systems with a single shared * o collapses to normal function call on systems with a single shared
* primary cache. * primary cache.
* o CONFIG_MIPS_MT_SMTC currently implies there is only one physical core.
*/ */
static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
{ {
#ifndef CONFIG_MIPS_MT_SMTC
smp_call_function(func, info, 1); smp_call_function(func, info, 1);
#endif
} }
static inline void smp_on_each_tlb(void (*func) (void *info), void *info) static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
......
/*
* Assembly Language Functions for MIPS MT SMTC support
*/
/*
* This file should be built into the kernel only if CONFIG_MIPS_MT_SMTC is set. */
#include <asm/regdef.h>
#include <asm/asmmacro.h>
#include <asm/stackframe.h>
#include <asm/irqflags.h>
/*
* "Software Interrupt" linkage.
*
* This is invoked when an "Interrupt" is sent from one TC to another,
* where the TC to be interrupted is halted, has it's Restart address
* and Status values saved by the "remote control" thread, then modified
* to cause execution to begin here, in kenel mode. This code then
* disguises the TC state as that of an exception and transfers
* control to the general exception or vectored interrupt handler.
*/
.set noreorder
/*
The __smtc_ipi_vector would use k0 and k1 as temporaries and
1) Set EXL (this is per-VPE, so this can't be done by proxy!)
2) Restore the K/CU and IXMT bits to the pre "exception" state
(EXL means no interrupts and access to the kernel map).
3) Set EPC to be the saved value of TCRestart.
4) Jump to the exception handler entry point passed by the sender.
CAN WE PROVE THAT WE WON'T DO THIS IF INTS DISABLED??
*/
/*
* Reviled and slandered vision: Set EXL and restore K/CU/IXMT
* state of pre-halt thread, then save everything and call
* thought some function pointer to imaginary_exception, which
* will parse a register value or memory message queue to
* deliver things like interprocessor interrupts. On return
* from that function, jump to the global ret_from_irq code
* to invoke the scheduler and return as appropriate.
*/
#define PT_PADSLOT4 (PT_R0-8)
#define PT_PADSLOT5 (PT_R0-4)
.text
.align 5
FEXPORT(__smtc_ipi_vector)
#ifdef CONFIG_CPU_MICROMIPS
nop
#endif
.set noat
/* Disable thread scheduling to make Status update atomic */
DMT 27 # dmt k1
_ehb
/* Set EXL */
mfc0 k0,CP0_STATUS
ori k0,k0,ST0_EXL
mtc0 k0,CP0_STATUS
_ehb
/* Thread scheduling now inhibited by EXL. Restore TE state. */
andi k1,k1,VPECONTROL_TE
beqz k1,1f
emt
1:
/*
* The IPI sender has put some information on the anticipated
* kernel stack frame. If we were in user mode, this will be
* built above the saved kernel SP. If we were already in the
* kernel, it will be built above the current CPU SP.
*
* Were we in kernel mode, as indicated by CU0?
*/
sll k1,k0,3
.set noreorder
bltz k1,2f
move k1,sp
.set reorder
/*
* If previously in user mode, set CU0 and use kernel stack.
*/
li k1,ST0_CU0
or k1,k1,k0
mtc0 k1,CP0_STATUS
_ehb
get_saved_sp
/* Interrupting TC will have pre-set values in slots in the new frame */
2: subu k1,k1,PT_SIZE
/* Load TCStatus Value */
lw k0,PT_TCSTATUS(k1)
/* Write it to TCStatus to restore CU/KSU/IXMT state */
mtc0 k0,$2,1
_ehb
lw k0,PT_EPC(k1)
mtc0 k0,CP0_EPC
/* Save all will redundantly recompute the SP, but use it for now */
SAVE_ALL
CLI
TRACE_IRQS_OFF
/* Function to be invoked passed stack pad slot 5 */
lw t0,PT_PADSLOT5(sp)
/* Argument from sender passed in stack pad slot 4 */
lw a0,PT_PADSLOT4(sp)
LONG_L s0, TI_REGS($28)
LONG_S sp, TI_REGS($28)
PTR_LA ra, ret_from_irq
jr t0
/*
* Called from idle loop to provoke processing of queued IPIs
* First IPI message in queue passed as argument.
*/
LEAF(self_ipi)
/* Before anything else, block interrupts */
mfc0 t0,CP0_TCSTATUS
ori t1,t0,TCSTATUS_IXMT
mtc0 t1,CP0_TCSTATUS
_ehb
/* We know we're in kernel mode, so prepare stack frame */
subu t1,sp,PT_SIZE
sw ra,PT_EPC(t1)
sw a0,PT_PADSLOT4(t1)
la t2,ipi_decode
sw t2,PT_PADSLOT5(t1)
/* Save pre-disable value of TCStatus */
sw t0,PT_TCSTATUS(t1)
j __smtc_ipi_vector
nop
END(self_ipi)
/*
* /proc hooks for SMTC kernel
* Copyright (C) 2005 Mips Technologies, Inc
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <asm/cpu.h>
#include <asm/processor.h>
#include <linux/atomic.h>
#include <asm/hardirq.h>
#include <asm/mmu_context.h>
#include <asm/mipsregs.h>
#include <asm/cacheflush.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <asm/smtc_proc.h>
/*
* /proc diagnostic and statistics hooks
*/
/*
* Statistics gathered
*/
unsigned long selfipis[NR_CPUS];
struct smtc_cpu_proc smtc_cpu_stats[NR_CPUS];
atomic_t smtc_fpu_recoveries;
static int smtc_proc_show(struct seq_file *m, void *v)
{
int i;
extern unsigned long ebase;
seq_printf(m, "SMTC Status Word: 0x%08x\n", smtc_status);
seq_printf(m, "Config7: 0x%08x\n", read_c0_config7());
seq_printf(m, "EBASE: 0x%08lx\n", ebase);
seq_printf(m, "Counter Interrupts taken per CPU (TC)\n");
for (i=0; i < NR_CPUS; i++)
seq_printf(m, "%d: %ld\n", i, smtc_cpu_stats[i].timerints);
seq_printf(m, "Self-IPIs by CPU:\n");
for(i = 0; i < NR_CPUS; i++)
seq_printf(m, "%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
seq_printf(m, "%d Recoveries of \"stolen\" FPU\n",
atomic_read(&smtc_fpu_recoveries));
return 0;
}
static int smtc_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, smtc_proc_show, NULL);
}
static const struct file_operations smtc_proc_fops = {
.open = smtc_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
void init_smtc_stats(void)
{
int i;
for (i=0; i<NR_CPUS; i++) {
smtc_cpu_stats[i].timerints = 0;
smtc_cpu_stats[i].selfipis = 0;
}
atomic_set(&smtc_fpu_recoveries, 0);
proc_create("smtc", 0444, NULL, &smtc_proc_fops);
}
static int proc_cpuinfo_chain_call(struct notifier_block *nfb,
unsigned long action_unused, void *data)
{
struct proc_cpuinfo_notifier_args *pcn = data;
struct seq_file *m = pcn->m;
unsigned long n = pcn->n;
if (!cpu_has_mipsmt)
return NOTIFY_OK;
seq_printf(m, "VPE\t\t\t: %d\n", cpu_data[n].vpe_id);
seq_printf(m, "TC\t\t\t: %d\n", cpu_data[n].tc_id);
return NOTIFY_OK;
}
static int __init proc_cpuinfo_notifier_init(void)
{
return proc_cpuinfo_notifier(proc_cpuinfo_chain_call, 0);
}
subsys_initcall(proc_cpuinfo_notifier_init);
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*
* Copyright (C) 2004 Mips Technologies, Inc
* Copyright (C) 2008 Kevin D. Kissell
*/
#include <linux/clockchips.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/smp.h>
#include <linux/cpumask.h>
#include <linux/interrupt.h>
#include <linux/kernel_stat.h>
#include <linux/module.h>
#include <linux/ftrace.h>
#include <linux/slab.h>
#include <asm/cpu.h>
#include <asm/processor.h>
#include <linux/atomic.h>
#include <asm/hardirq.h>
#include <asm/hazards.h>
#include <asm/irq.h>
#include <asm/idle.h>
#include <asm/mmu_context.h>
#include <asm/mipsregs.h>
#include <asm/cacheflush.h>
#include <asm/time.h>
#include <asm/addrspace.h>
#include <asm/smtc.h>
#include <asm/smtc_proc.h>
#include <asm/setup.h>
/*
* SMTC Kernel needs to manipulate low-level CPU interrupt mask
* in do_IRQ. These are passed in setup_irq_smtc() and stored
* in this table.
*/
unsigned long irq_hwmask[NR_IRQS];
#define LOCK_MT_PRA() \
local_irq_save(flags); \
mtflags = dmt()
#define UNLOCK_MT_PRA() \
emt(mtflags); \
local_irq_restore(flags)
#define LOCK_CORE_PRA() \
local_irq_save(flags); \
mtflags = dvpe()
#define UNLOCK_CORE_PRA() \
evpe(mtflags); \
local_irq_restore(flags)
/*
* Data structures purely associated with SMTC parallelism
*/
/*
* Table for tracking ASIDs whose lifetime is prolonged.
*/
asiduse smtc_live_asid[MAX_SMTC_TLBS][MAX_SMTC_ASIDS];
/*
* Number of InterProcessor Interrupt (IPI) message buffers to allocate
*/
#define IPIBUF_PER_CPU 4
struct smtc_ipi_q IPIQ[NR_CPUS];
static struct smtc_ipi_q freeIPIq;
/*
* Number of FPU contexts for each VPE
*/
static int smtc_nconf1[MAX_SMTC_VPES];
/* Forward declarations */
void ipi_decode(struct smtc_ipi *);
static void post_direct_ipi(int cpu, struct smtc_ipi *pipi);
static void setup_cross_vpe_interrupts(unsigned int nvpe);
void init_smtc_stats(void);
/* Global SMTC Status */
unsigned int smtc_status;
/* Boot command line configuration overrides */
static int vpe0limit;
static int ipibuffers;
static int nostlb;
static int asidmask;
unsigned long smtc_asid_mask = 0xff;
static int __init vpe0tcs(char *str)
{
get_option(&str, &vpe0limit);
return 1;
}
static int __init ipibufs(char *str)
{
get_option(&str, &ipibuffers);
return 1;
}
static int __init stlb_disable(char *s)
{
nostlb = 1;
return 1;
}
static int __init asidmask_set(char *str)
{
get_option(&str, &asidmask);
switch (asidmask) {
case 0x1:
case 0x3:
case 0x7:
case 0xf:
case 0x1f:
case 0x3f:
case 0x7f:
case 0xff:
smtc_asid_mask = (unsigned long)asidmask;
break;
default:
printk("ILLEGAL ASID mask 0x%x from command line\n", asidmask);
}
return 1;
}
__setup("vpe0tcs=", vpe0tcs);
__setup("ipibufs=", ipibufs);
__setup("nostlb", stlb_disable);
__setup("asidmask=", asidmask_set);
#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
static int hang_trig;
static int __init hangtrig_enable(char *s)
{
hang_trig = 1;
return 1;
}
__setup("hangtrig", hangtrig_enable);
#define DEFAULT_BLOCKED_IPI_LIMIT 32
static int timerq_limit = DEFAULT_BLOCKED_IPI_LIMIT;
static int __init tintq(char *str)
{
get_option(&str, &timerq_limit);
return 1;
}
__setup("tintq=", tintq);
static int imstuckcount[MAX_SMTC_VPES][8];
/* vpemask represents IM/IE bits of per-VPE Status registers, low-to-high */
static int vpemask[MAX_SMTC_VPES][8] = {
{0, 0, 1, 0, 0, 0, 0, 1},
{0, 0, 0, 0, 0, 0, 0, 1}
};
int tcnoprog[NR_CPUS];
static atomic_t idle_hook_initialized = ATOMIC_INIT(0);
static int clock_hang_reported[NR_CPUS];
#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
/*
* Configure shared TLB - VPC configuration bit must be set by caller
*/
static void smtc_configure_tlb(void)
{
int i, tlbsiz, vpes;
unsigned long mvpconf0;
unsigned long config1val;
/* Set up ASID preservation table */
for (vpes=0; vpes<MAX_SMTC_TLBS; vpes++) {
for(i = 0; i < MAX_SMTC_ASIDS; i++) {
smtc_live_asid[vpes][i] = 0;
}
}
mvpconf0 = read_c0_mvpconf0();
if ((vpes = ((mvpconf0 & MVPCONF0_PVPE)
>> MVPCONF0_PVPE_SHIFT) + 1) > 1) {
/* If we have multiple VPEs, try to share the TLB */
if ((mvpconf0 & MVPCONF0_TLBS) && !nostlb) {
/*
* If TLB sizing is programmable, shared TLB
* size is the total available complement.
* Otherwise, we have to take the sum of all
* static VPE TLB entries.
*/
if ((tlbsiz = ((mvpconf0 & MVPCONF0_PTLBE)
>> MVPCONF0_PTLBE_SHIFT)) == 0) {
/*
* If there's more than one VPE, there had better
* be more than one TC, because we need one to bind
* to each VPE in turn to be able to read
* its configuration state!
*/
settc(1);
/* Stop the TC from doing anything foolish */
write_tc_c0_tchalt(TCHALT_H);
mips_ihb();
/* No need to un-Halt - that happens later anyway */
for (i=0; i < vpes; i++) {
write_tc_c0_tcbind(i);
/*
* To be 100% sure we're really getting the right
* information, we exit the configuration state
* and do an IHB after each rebinding.
*/
write_c0_mvpcontrol(
read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
mips_ihb();
/*
* Only count if the MMU Type indicated is TLB
*/
if (((read_vpe_c0_config() & MIPS_CONF_MT) >> 7) == 1) {
config1val = read_vpe_c0_config1();
tlbsiz += ((config1val >> 25) & 0x3f) + 1;
}
/* Put core back in configuration state */
write_c0_mvpcontrol(
read_c0_mvpcontrol() | MVPCONTROL_VPC );
mips_ihb();
}
}
write_c0_mvpcontrol(read_c0_mvpcontrol() | MVPCONTROL_STLB);
ehb();
/*
* Setup kernel data structures to use software total,
* rather than read the per-VPE Config1 value. The values
* for "CPU 0" gets copied to all the other CPUs as part
* of their initialization in smtc_cpu_setup().
*/
/* MIPS32 limits TLB indices to 64 */
if (tlbsiz > 64)
tlbsiz = 64;
cpu_data[0].tlbsize = current_cpu_data.tlbsize = tlbsiz;
smtc_status |= SMTC_TLB_SHARED;
local_flush_tlb_all();
printk("TLB of %d entry pairs shared by %d VPEs\n",
tlbsiz, vpes);
} else {
printk("WARNING: TLB Not Sharable on SMTC Boot!\n");
}
}
}
/*
* Incrementally build the CPU map out of constituent MIPS MT cores,
* using the specified available VPEs and TCs. Plaform code needs
* to ensure that each MIPS MT core invokes this routine on reset,
* one at a time(!).
*
* This version of the build_cpu_map and prepare_cpus routines assumes
* that *all* TCs of a MIPS MT core will be used for Linux, and that
* they will be spread across *all* available VPEs (to minimise the
* loss of efficiency due to exception service serialization).
* An improved version would pick up configuration information and
* possibly leave some TCs/VPEs as "slave" processors.
*
* Use c0_MVPConf0 to find out how many TCs are available, setting up
* cpu_possible_mask and the logical/physical mappings.
*/
int __init smtc_build_cpu_map(int start_cpu_slot)
{
int i, ntcs;
/*
* The CPU map isn't actually used for anything at this point,
* so it's not clear what else we should do apart from set
* everything up so that "logical" = "physical".
*/
ntcs = ((read_c0_mvpconf0() & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
for (i=start_cpu_slot; i<NR_CPUS && i<ntcs; i++) {
set_cpu_possible(i, true);
__cpu_number_map[i] = i;
__cpu_logical_map[i] = i;
}
#ifdef CONFIG_MIPS_MT_FPAFF
/* Initialize map of CPUs with FPUs */
cpus_clear(mt_fpu_cpumask);
#endif
/* One of those TC's is the one booting, and not a secondary... */
printk("%i available secondary CPU TC(s)\n", i - 1);
return i;
}
/*
* Common setup before any secondaries are started
* Make sure all CPUs are in a sensible state before we boot any of the
* secondaries.
*
* For MIPS MT "SMTC" operation, we set up all TCs, spread as evenly
* as possible across the available VPEs.
*/
static void smtc_tc_setup(int vpe, int tc, int cpu)
{
static int cp1contexts[MAX_SMTC_VPES];
/*
* Make a local copy of the available FPU contexts in order
* to keep track of TCs that can have one.
*/
if (tc == 1)
{
/*
* FIXME: Multi-core SMTC hasn't been tested and the
* maximum number of VPEs may change.
*/
cp1contexts[0] = smtc_nconf1[0] - 1;
cp1contexts[1] = smtc_nconf1[1];
}
settc(tc);
write_tc_c0_tchalt(TCHALT_H);
mips_ihb();
write_tc_c0_tcstatus((read_tc_c0_tcstatus()
& ~(TCSTATUS_TKSU | TCSTATUS_DA | TCSTATUS_IXMT))
| TCSTATUS_A);
/*
* TCContext gets an offset from the base of the IPIQ array
* to be used in low-level code to detect the presence of
* an active IPI queue.
*/
write_tc_c0_tccontext((sizeof(struct smtc_ipi_q) * cpu) << 16);
/* Bind TC to VPE. */
write_tc_c0_tcbind(vpe);
/* In general, all TCs should have the same cpu_data indications. */
memcpy(&cpu_data[cpu], &cpu_data[0], sizeof(struct cpuinfo_mips));
/* Check to see if there is a FPU context available for this TC. */
if (!cp1contexts[vpe])
cpu_data[cpu].options &= ~MIPS_CPU_FPU;
else
cp1contexts[vpe]--;
/* Store the TC and VPE into the cpu_data structure. */
cpu_data[cpu].vpe_id = vpe;
cpu_data[cpu].tc_id = tc;
/* FIXME: Multi-core SMTC hasn't been tested, but be prepared. */
cpu_data[cpu].core = (read_vpe_c0_ebase() >> 1) & 0xff;
}
/*
* Tweak to get Count registers synced as closely as possible. The
* value seems good for 34K-class cores.
*/
#define CP0_SKEW 8
void smtc_prepare_cpus(int cpus)
{
int i, vpe, tc, ntc, nvpe, tcpervpe[NR_CPUS], slop, cpu;
unsigned long flags;
unsigned long val;
int nipi;
struct smtc_ipi *pipi;
/* disable interrupts so we can disable MT */
local_irq_save(flags);
/* disable MT so we can configure */
dvpe();
dmt();
spin_lock_init(&freeIPIq.lock);
/*
* We probably don't have as many VPEs as we do SMP "CPUs",
* but it's possible - and in any case we'll never use more!
*/
for (i=0; i<NR_CPUS; i++) {
IPIQ[i].head = IPIQ[i].tail = NULL;
spin_lock_init(&IPIQ[i].lock);
IPIQ[i].depth = 0;
IPIQ[i].resched_flag = 0; /* No reschedules queued initially */
}
/* cpu_data index starts at zero */
cpu = 0;
cpu_data[cpu].vpe_id = 0;
cpu_data[cpu].tc_id = 0;
cpu_data[cpu].core = (read_c0_ebase() >> 1) & 0xff;
cpu++;
/* Report on boot-time options */
mips_mt_set_cpuoptions();
if (vpelimit > 0)
printk("Limit of %d VPEs set\n", vpelimit);
if (tclimit > 0)
printk("Limit of %d TCs set\n", tclimit);
if (nostlb) {
printk("Shared TLB Use Inhibited - UNSAFE for Multi-VPE Operation\n");
}
if (asidmask)
printk("ASID mask value override to 0x%x\n", asidmask);
/* Temporary */
#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
if (hang_trig)
printk("Logic Analyser Trigger on suspected TC hang\n");
#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
/* Put MVPE's into 'configuration state' */
write_c0_mvpcontrol( read_c0_mvpcontrol() | MVPCONTROL_VPC );
val = read_c0_mvpconf0();
nvpe = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
if (vpelimit > 0 && nvpe > vpelimit)
nvpe = vpelimit;
ntc = ((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
if (ntc > NR_CPUS)
ntc = NR_CPUS;
if (tclimit > 0 && ntc > tclimit)
ntc = tclimit;
slop = ntc % nvpe;
for (i = 0; i < nvpe; i++) {
tcpervpe[i] = ntc / nvpe;
if (slop) {
if((slop - i) > 0) tcpervpe[i]++;
}
}
/* Handle command line override for VPE0 */
if (vpe0limit > ntc) vpe0limit = ntc;
if (vpe0limit > 0) {
int slopslop;
if (vpe0limit < tcpervpe[0]) {
/* Reducing TC count - distribute to others */
slop = tcpervpe[0] - vpe0limit;
slopslop = slop % (nvpe - 1);
tcpervpe[0] = vpe0limit;
for (i = 1; i < nvpe; i++) {
tcpervpe[i] += slop / (nvpe - 1);
if(slopslop && ((slopslop - (i - 1) > 0)))
tcpervpe[i]++;
}
} else if (vpe0limit > tcpervpe[0]) {
/* Increasing TC count - steal from others */
slop = vpe0limit - tcpervpe[0];
slopslop = slop % (nvpe - 1);
tcpervpe[0] = vpe0limit;
for (i = 1; i < nvpe; i++) {
tcpervpe[i] -= slop / (nvpe - 1);
if(slopslop && ((slopslop - (i - 1) > 0)))
tcpervpe[i]--;
}
}
}
/* Set up shared TLB */
smtc_configure_tlb();
for (tc = 0, vpe = 0 ; (vpe < nvpe) && (tc < ntc) ; vpe++) {
/* Get number of CP1 contexts for each VPE. */
if (tc == 0)
{
/*
* Do not call settc() for TC0 or the FPU context
* value will be incorrect. Besides, we know that
* we are TC0 anyway.
*/
smtc_nconf1[0] = ((read_vpe_c0_vpeconf1() &
VPECONF1_NCP1) >> VPECONF1_NCP1_SHIFT);
if (nvpe == 2)
{
settc(1);
smtc_nconf1[1] = ((read_vpe_c0_vpeconf1() &
VPECONF1_NCP1) >> VPECONF1_NCP1_SHIFT);
settc(0);
}
}
if (tcpervpe[vpe] == 0)
continue;
if (vpe != 0)
printk(", ");
printk("VPE %d: TC", vpe);
for (i = 0; i < tcpervpe[vpe]; i++) {
/*
* TC 0 is bound to VPE 0 at reset,
* and is presumably executing this
* code. Leave it alone!
*/
if (tc != 0) {
smtc_tc_setup(vpe, tc, cpu);
if (vpe != 0) {
/*
* Set MVP bit (possibly again). Do it
* here to catch CPUs that have no TCs
* bound to the VPE at reset. In that
* case, a TC must be bound to the VPE
* before we can set VPEControl[MVP]
*/
write_vpe_c0_vpeconf0(
read_vpe_c0_vpeconf0() |
VPECONF0_MVP);
}
cpu++;
}
printk(" %d", tc);
tc++;
}
if (vpe != 0) {
/*
* Allow this VPE to control others.
*/
write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() |
VPECONF0_MVP);
/*
* Clear any stale software interrupts from VPE's Cause
*/
write_vpe_c0_cause(0);
/*
* Clear ERL/EXL of VPEs other than 0
* and set restricted interrupt enable/mask.
*/
write_vpe_c0_status((read_vpe_c0_status()
& ~(ST0_BEV | ST0_ERL | ST0_EXL | ST0_IM))
| (STATUSF_IP0 | STATUSF_IP1 | STATUSF_IP7
| ST0_IE));
/*
* set config to be the same as vpe0,
* particularly kseg0 coherency alg
*/
write_vpe_c0_config(read_c0_config());
/* Clear any pending timer interrupt */
write_vpe_c0_compare(0);
/* Propagate Config7 */
write_vpe_c0_config7(read_c0_config7());
write_vpe_c0_count(read_c0_count() + CP0_SKEW);
ehb();
}
/* enable multi-threading within VPE */
write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() | VPECONTROL_TE);
/* enable the VPE */
write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
}
/*
* Pull any physically present but unused TCs out of circulation.
*/
while (tc < (((val & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1)) {
set_cpu_possible(tc, false);
set_cpu_present(tc, false);
tc++;
}
/* release config state */
write_c0_mvpcontrol( read_c0_mvpcontrol() & ~ MVPCONTROL_VPC );
printk("\n");
/* Set up coprocessor affinity CPU mask(s) */
#ifdef CONFIG_MIPS_MT_FPAFF
for (tc = 0; tc < ntc; tc++) {
if (cpu_data[tc].options & MIPS_CPU_FPU)
cpu_set(tc, mt_fpu_cpumask);
}
#endif
/* set up ipi interrupts... */
/* If we have multiple VPEs running, set up the cross-VPE interrupt */
setup_cross_vpe_interrupts(nvpe);
/* Set up queue of free IPI "messages". */
nipi = NR_CPUS * IPIBUF_PER_CPU;
if (ipibuffers > 0)
nipi = ipibuffers;
pipi = kmalloc(nipi *sizeof(struct smtc_ipi), GFP_KERNEL);
if (pipi == NULL)
panic("kmalloc of IPI message buffers failed");
else
printk("IPI buffer pool of %d buffers\n", nipi);
for (i = 0; i < nipi; i++) {
smtc_ipi_nq(&freeIPIq, pipi);
pipi++;
}
/* Arm multithreading and enable other VPEs - but all TCs are Halted */
emt(EMT_ENABLE);
evpe(EVPE_ENABLE);
local_irq_restore(flags);
/* Initialize SMTC /proc statistics/diagnostics */
init_smtc_stats();
}
/*
* Setup the PC, SP, and GP of a secondary processor and start it
* running!
* smp_bootstrap is the place to resume from
* __KSTK_TOS(idle) is apparently the stack pointer
* (unsigned long)idle->thread_info the gp
*
*/
void smtc_boot_secondary(int cpu, struct task_struct *idle)
{
extern u32 kernelsp[NR_CPUS];
unsigned long flags;
int mtflags;
LOCK_MT_PRA();
if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
dvpe();
}
settc(cpu_data[cpu].tc_id);
/* pc */
write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
/* stack pointer */
kernelsp[cpu] = __KSTK_TOS(idle);
write_tc_gpr_sp(__KSTK_TOS(idle));
/* global pointer */
write_tc_gpr_gp((unsigned long)task_thread_info(idle));
smtc_status |= SMTC_MTC_ACTIVE;
write_tc_c0_tchalt(0);
if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
evpe(EVPE_ENABLE);
}
UNLOCK_MT_PRA();
}
void smtc_init_secondary(void)
{
}
void smtc_smp_finish(void)
{
int cpu = smp_processor_id();
/*
* Lowest-numbered CPU per VPE starts a clock tick.
* Like per_cpu_trap_init() hack, this assumes that
* SMTC init code assigns TCs consdecutively and
* in ascending order across available VPEs.
*/
if (cpu > 0 && (cpu_data[cpu].vpe_id != cpu_data[cpu - 1].vpe_id))
write_c0_compare(read_c0_count() + mips_hpt_frequency/HZ);
local_irq_enable();
printk("TC %d going on-line as CPU %d\n",
cpu_data[smp_processor_id()].tc_id, smp_processor_id());
}
void smtc_cpus_done(void)
{
}
/*
* Support for SMTC-optimized driver IRQ registration
*/
/*
* SMTC Kernel needs to manipulate low-level CPU interrupt mask
* in do_IRQ. These are passed in setup_irq_smtc() and stored
* in this table.
*/
int setup_irq_smtc(unsigned int irq, struct irqaction * new,
unsigned long hwmask)
{
#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
unsigned int vpe = current_cpu_data.vpe_id;
vpemask[vpe][irq - MIPS_CPU_IRQ_BASE] = 1;
#endif
irq_hwmask[irq] = hwmask;
return setup_irq(irq, new);
}
#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
/*
* Support for IRQ affinity to TCs
*/
void smtc_set_irq_affinity(unsigned int irq, cpumask_t affinity)
{
/*
* If a "fast path" cache of quickly decodable affinity state
* is maintained, this is where it gets done, on a call up
* from the platform affinity code.
*/
}
void smtc_forward_irq(struct irq_data *d)
{
unsigned int irq = d->irq;
int target;
/*
* OK wise guy, now figure out how to get the IRQ
* to be serviced on an authorized "CPU".
*
* Ideally, to handle the situation where an IRQ has multiple
* eligible CPUS, we would maintain state per IRQ that would
* allow a fair distribution of service requests. Since the
* expected use model is any-or-only-one, for simplicity
* and efficiency, we just pick the easiest one to find.
*/
target = cpumask_first(d->affinity);
/*
* We depend on the platform code to have correctly processed
* IRQ affinity change requests to ensure that the IRQ affinity
* mask has been purged of bits corresponding to nonexistent and
* offline "CPUs", and to TCs bound to VPEs other than the VPE
* connected to the physical interrupt input for the interrupt
* in question. Otherwise we have a nasty problem with interrupt
* mask management. This is best handled in non-performance-critical
* platform IRQ affinity setting code, to minimize interrupt-time
* checks.
*/
/* If no one is eligible, service locally */
if (target >= NR_CPUS)
do_IRQ_no_affinity(irq);
else
smtc_send_ipi(target, IRQ_AFFINITY_IPI, irq);
}
#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
/*
* IPI model for SMTC is tricky, because interrupts aren't TC-specific.
* Within a VPE one TC can interrupt another by different approaches.
* The easiest to get right would probably be to make all TCs except
* the target IXMT and set a software interrupt, but an IXMT-based
* scheme requires that a handler must run before a new IPI could
* be sent, which would break the "broadcast" loops in MIPS MT.
* A more gonzo approach within a VPE is to halt the TC, extract
* its Restart, Status, and a couple of GPRs, and program the Restart
* address to emulate an interrupt.
*
* Within a VPE, one can be confident that the target TC isn't in
* a critical EXL state when halted, since the write to the Halt
* register could not have issued on the writing thread if the
* halting thread had EXL set. So k0 and k1 of the target TC
* can be used by the injection code. Across VPEs, one can't
* be certain that the target TC isn't in a critical exception
* state. So we try a two-step process of sending a software
* interrupt to the target VPE, which either handles the event
* itself (if it was the target) or injects the event within
* the VPE.
*/
static void smtc_ipi_qdump(void)
{
int i;
struct smtc_ipi *temp;
for (i = 0; i < NR_CPUS ;i++) {
pr_info("IPIQ[%d]: head = 0x%x, tail = 0x%x, depth = %d\n",
i, (unsigned)IPIQ[i].head, (unsigned)IPIQ[i].tail,
IPIQ[i].depth);
temp = IPIQ[i].head;
while (temp != IPIQ[i].tail) {
pr_debug("%d %d %d: ", temp->type, temp->dest,
(int)temp->arg);
#ifdef SMTC_IPI_DEBUG
pr_debug("%u %lu\n", temp->sender, temp->stamp);
#else
pr_debug("\n");
#endif
temp = temp->flink;
}
}
}
/*
* The standard atomic.h primitives don't quite do what we want
* here: We need an atomic add-and-return-previous-value (which
* could be done with atomic_add_return and a decrement) and an
* atomic set/zero-and-return-previous-value (which can't really
* be done with the atomic.h primitives). And since this is
* MIPS MT, we can assume that we have LL/SC.
*/
static inline int atomic_postincrement(atomic_t *v)
{
unsigned long result;
unsigned long temp;
__asm__ __volatile__(
"1: ll %0, %2 \n"
" addu %1, %0, 1 \n"
" sc %1, %2 \n"
" beqz %1, 1b \n"
__WEAK_LLSC_MB
: "=&r" (result), "=&r" (temp), "=m" (v->counter)
: "m" (v->counter)
: "memory");
return result;
}
void smtc_send_ipi(int cpu, int type, unsigned int action)
{
int tcstatus;
struct smtc_ipi *pipi;
unsigned long flags;
int mtflags;
unsigned long tcrestart;
int set_resched_flag = (type == LINUX_SMP_IPI &&
action == SMP_RESCHEDULE_YOURSELF);
if (cpu == smp_processor_id()) {
printk("Cannot Send IPI to self!\n");
return;
}
if (set_resched_flag && IPIQ[cpu].resched_flag != 0)
return; /* There is a reschedule queued already */
/* Set up a descriptor, to be delivered either promptly or queued */
pipi = smtc_ipi_dq(&freeIPIq);
if (pipi == NULL) {
bust_spinlocks(1);
mips_mt_regdump(dvpe());
panic("IPI Msg. Buffers Depleted");
}
pipi->type = type;
pipi->arg = (void *)action;
pipi->dest = cpu;
if (cpu_data[cpu].vpe_id != cpu_data[smp_processor_id()].vpe_id) {
/* If not on same VPE, enqueue and send cross-VPE interrupt */
IPIQ[cpu].resched_flag |= set_resched_flag;
smtc_ipi_nq(&IPIQ[cpu], pipi);
LOCK_CORE_PRA();
settc(cpu_data[cpu].tc_id);
write_vpe_c0_cause(read_vpe_c0_cause() | C_SW1);
UNLOCK_CORE_PRA();
} else {
/*
* Not sufficient to do a LOCK_MT_PRA (dmt) here,
* since ASID shootdown on the other VPE may
* collide with this operation.
*/
LOCK_CORE_PRA();
settc(cpu_data[cpu].tc_id);
/* Halt the targeted TC */
write_tc_c0_tchalt(TCHALT_H);
mips_ihb();
/*
* Inspect TCStatus - if IXMT is set, we have to queue
* a message. Otherwise, we set up the "interrupt"
* of the other TC
*/
tcstatus = read_tc_c0_tcstatus();
if ((tcstatus & TCSTATUS_IXMT) != 0) {
/*
* If we're in the the irq-off version of the wait
* loop, we need to force exit from the wait and
* do a direct post of the IPI.
*/
if (cpu_wait == r4k_wait_irqoff) {
tcrestart = read_tc_c0_tcrestart();
if (address_is_in_r4k_wait_irqoff(tcrestart)) {
write_tc_c0_tcrestart(__pastwait);
tcstatus &= ~TCSTATUS_IXMT;
write_tc_c0_tcstatus(tcstatus);
goto postdirect;
}
}
/*
* Otherwise we queue the message for the target TC
* to pick up when he does a local_irq_restore()
*/
write_tc_c0_tchalt(0);
UNLOCK_CORE_PRA();
IPIQ[cpu].resched_flag |= set_resched_flag;
smtc_ipi_nq(&IPIQ[cpu], pipi);
} else {
postdirect:
post_direct_ipi(cpu, pipi);
write_tc_c0_tchalt(0);
UNLOCK_CORE_PRA();
}
}
}
/*
* Send IPI message to Halted TC, TargTC/TargVPE already having been set
*/
static void post_direct_ipi(int cpu, struct smtc_ipi *pipi)
{
struct pt_regs *kstack;
unsigned long tcstatus;
unsigned long tcrestart;
extern u32 kernelsp[NR_CPUS];
extern void __smtc_ipi_vector(void);
//printk("%s: on %d for %d\n", __func__, smp_processor_id(), cpu);
/* Extract Status, EPC from halted TC */
tcstatus = read_tc_c0_tcstatus();
tcrestart = read_tc_c0_tcrestart();
/* If TCRestart indicates a WAIT instruction, advance the PC */
if ((tcrestart & 0x80000000)
&& ((*(unsigned int *)tcrestart & 0xfe00003f) == 0x42000020)) {
tcrestart += 4;
}
/*
* Save on TC's future kernel stack
*
* CU bit of Status is indicator that TC was
* already running on a kernel stack...
*/
if (tcstatus & ST0_CU0) {
/* Note that this "- 1" is pointer arithmetic */
kstack = ((struct pt_regs *)read_tc_gpr_sp()) - 1;
} else {
kstack = ((struct pt_regs *)kernelsp[cpu]) - 1;
}
kstack->cp0_epc = (long)tcrestart;
/* Save TCStatus */
kstack->cp0_tcstatus = tcstatus;
/* Pass token of operation to be performed kernel stack pad area */
kstack->pad0[4] = (unsigned long)pipi;
/* Pass address of function to be called likewise */
kstack->pad0[5] = (unsigned long)&ipi_decode;
/* Set interrupt exempt and kernel mode */
tcstatus |= TCSTATUS_IXMT;
tcstatus &= ~TCSTATUS_TKSU;
write_tc_c0_tcstatus(tcstatus);
ehb();
/* Set TC Restart address to be SMTC IPI vector */
write_tc_c0_tcrestart(__smtc_ipi_vector);
}
static void ipi_resched_interrupt(void)
{
scheduler_ipi();
}
static void ipi_call_interrupt(void)
{
/* Invoke generic function invocation code in smp.c */
smp_call_function_interrupt();
}
DECLARE_PER_CPU(struct clock_event_device, mips_clockevent_device);
static void __irq_entry smtc_clock_tick_interrupt(void)
{
unsigned int cpu = smp_processor_id();
struct clock_event_device *cd;
int irq = MIPS_CPU_IRQ_BASE + 1;
irq_enter();
kstat_incr_irq_this_cpu(irq);
cd = &per_cpu(mips_clockevent_device, cpu);
cd->event_handler(cd);
irq_exit();
}
void ipi_decode(struct smtc_ipi *pipi)
{
void *arg_copy = pipi->arg;
int type_copy = pipi->type;
smtc_ipi_nq(&freeIPIq, pipi);
switch (type_copy) {
case SMTC_CLOCK_TICK:
smtc_clock_tick_interrupt();
break;
case LINUX_SMP_IPI:
switch ((int)arg_copy) {
case SMP_RESCHEDULE_YOURSELF:
ipi_resched_interrupt();
break;
case SMP_CALL_FUNCTION:
ipi_call_interrupt();
break;
default:
printk("Impossible SMTC IPI Argument %p\n", arg_copy);
break;
}
break;
#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
case IRQ_AFFINITY_IPI:
/*
* Accept a "forwarded" interrupt that was initially
* taken by a TC who doesn't have affinity for the IRQ.
*/
do_IRQ_no_affinity((int)arg_copy);
break;
#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
default:
printk("Impossible SMTC IPI Type 0x%x\n", type_copy);
break;
}
}
/*
* Similar to smtc_ipi_replay(), but invoked from context restore,
* so it reuses the current exception frame rather than set up a
* new one with self_ipi.
*/
void deferred_smtc_ipi(void)
{
int cpu = smp_processor_id();
/*
* Test is not atomic, but much faster than a dequeue,
* and the vast majority of invocations will have a null queue.
* If irq_disabled when this was called, then any IPIs queued
* after we test last will be taken on the next irq_enable/restore.
* If interrupts were enabled, then any IPIs added after the
* last test will be taken directly.
*/
while (IPIQ[cpu].head != NULL) {
struct smtc_ipi_q *q = &IPIQ[cpu];
struct smtc_ipi *pipi;
unsigned long flags;
/*
* It may be possible we'll come in with interrupts
* already enabled.
*/
local_irq_save(flags);
spin_lock(&q->lock);
pipi = __smtc_ipi_dq(q);
spin_unlock(&q->lock);
if (pipi != NULL) {
if (pipi->type == LINUX_SMP_IPI &&
(int)pipi->arg == SMP_RESCHEDULE_YOURSELF)
IPIQ[cpu].resched_flag = 0;
ipi_decode(pipi);
}
/*
* The use of the __raw_local restore isn't
* as obviously necessary here as in smtc_ipi_replay(),
* but it's more efficient, given that we're already
* running down the IPI queue.
*/
__arch_local_irq_restore(flags);
}
}
/*
* Cross-VPE interrupts in the SMTC prototype use "software interrupts"
* set via cross-VPE MTTR manipulation of the Cause register. It would be
* in some regards preferable to have external logic for "doorbell" hardware
* interrupts.
*/
static int cpu_ipi_irq = MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_IRQ;
static irqreturn_t ipi_interrupt(int irq, void *dev_idm)
{
int my_vpe = cpu_data[smp_processor_id()].vpe_id;
int my_tc = cpu_data[smp_processor_id()].tc_id;
int cpu;
struct smtc_ipi *pipi;
unsigned long tcstatus;
int sent;
unsigned long flags;
unsigned int mtflags;
unsigned int vpflags;
/*
* So long as cross-VPE interrupts are done via
* MFTR/MTTR read-modify-writes of Cause, we need
* to stop other VPEs whenever the local VPE does
* anything similar.
*/
local_irq_save(flags);
vpflags = dvpe();
clear_c0_cause(0x100 << MIPS_CPU_IPI_IRQ);
set_c0_status(0x100 << MIPS_CPU_IPI_IRQ);
irq_enable_hazard();
evpe(vpflags);
local_irq_restore(flags);
/*
* Cross-VPE Interrupt handler: Try to directly deliver IPIs
* queued for TCs on this VPE other than the current one.
* Return-from-interrupt should cause us to drain the queue
* for the current TC, so we ought not to have to do it explicitly here.
*/
for_each_online_cpu(cpu) {
if (cpu_data[cpu].vpe_id != my_vpe)
continue;
pipi = smtc_ipi_dq(&IPIQ[cpu]);
if (pipi != NULL) {
if (cpu_data[cpu].tc_id != my_tc) {
sent = 0;
LOCK_MT_PRA();
settc(cpu_data[cpu].tc_id);
write_tc_c0_tchalt(TCHALT_H);
mips_ihb();
tcstatus = read_tc_c0_tcstatus();
if ((tcstatus & TCSTATUS_IXMT) == 0) {
post_direct_ipi(cpu, pipi);
sent = 1;
}
write_tc_c0_tchalt(0);
UNLOCK_MT_PRA();
if (!sent) {
smtc_ipi_req(&IPIQ[cpu], pipi);
}
} else {
/*
* ipi_decode() should be called
* with interrupts off
*/
local_irq_save(flags);
if (pipi->type == LINUX_SMP_IPI &&
(int)pipi->arg == SMP_RESCHEDULE_YOURSELF)
IPIQ[cpu].resched_flag = 0;
ipi_decode(pipi);
local_irq_restore(flags);
}
}
}
return IRQ_HANDLED;
}
static void ipi_irq_dispatch(void)
{
do_IRQ(cpu_ipi_irq);
}
static struct irqaction irq_ipi = {
.handler = ipi_interrupt,
.flags = IRQF_PERCPU,
.name = "SMTC_IPI"
};
static void setup_cross_vpe_interrupts(unsigned int nvpe)
{
if (nvpe < 1)
return;
if (!cpu_has_vint)
panic("SMTC Kernel requires Vectored Interrupt support");
set_vi_handler(MIPS_CPU_IPI_IRQ, ipi_irq_dispatch);
setup_irq_smtc(cpu_ipi_irq, &irq_ipi, (0x100 << MIPS_CPU_IPI_IRQ));
irq_set_handler(cpu_ipi_irq, handle_percpu_irq);
}
/*
* SMTC-specific hacks invoked from elsewhere in the kernel.
*/
/*
* smtc_ipi_replay is called from raw_local_irq_restore
*/
void smtc_ipi_replay(void)
{
unsigned int cpu = smp_processor_id();
/*
* To the extent that we've ever turned interrupts off,
* we may have accumulated deferred IPIs. This is subtle.
* we should be OK: If we pick up something and dispatch
* it here, that's great. If we see nothing, but concurrent
* with this operation, another TC sends us an IPI, IXMT
* is clear, and we'll handle it as a real pseudo-interrupt
* and not a pseudo-pseudo interrupt. The important thing
* is to do the last check for queued message *after* the
* re-enabling of interrupts.
*/
while (IPIQ[cpu].head != NULL) {
struct smtc_ipi_q *q = &IPIQ[cpu];
struct smtc_ipi *pipi;
unsigned long flags;
/*
* It's just possible we'll come in with interrupts
* already enabled.
*/
local_irq_save(flags);
spin_lock(&q->lock);
pipi = __smtc_ipi_dq(q);
spin_unlock(&q->lock);
/*
** But use a raw restore here to avoid recursion.
*/
__arch_local_irq_restore(flags);
if (pipi) {
self_ipi(pipi);
smtc_cpu_stats[cpu].selfipis++;
}
}
}
EXPORT_SYMBOL(smtc_ipi_replay);
void smtc_idle_loop_hook(void)
{
#ifdef CONFIG_SMTC_IDLE_HOOK_DEBUG
int im;
int flags;
int mtflags;
int bit;
int vpe;
int tc;
int hook_ntcs;
/*
* printk within DMT-protected regions can deadlock,
* so buffer diagnostic messages for later output.
*/
char *pdb_msg;
char id_ho_db_msg[768]; /* worst-case use should be less than 700 */
if (atomic_read(&idle_hook_initialized) == 0) { /* fast test */
if (atomic_add_return(1, &idle_hook_initialized) == 1) {
int mvpconf0;
/* Tedious stuff to just do once */
mvpconf0 = read_c0_mvpconf0();
hook_ntcs = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
if (hook_ntcs > NR_CPUS)
hook_ntcs = NR_CPUS;
for (tc = 0; tc < hook_ntcs; tc++) {
tcnoprog[tc] = 0;
clock_hang_reported[tc] = 0;
}
for (vpe = 0; vpe < 2; vpe++)
for (im = 0; im < 8; im++)
imstuckcount[vpe][im] = 0;
printk("Idle loop test hook initialized for %d TCs\n", hook_ntcs);
atomic_set(&idle_hook_initialized, 1000);
} else {
/* Someone else is initializing in parallel - let 'em finish */
while (atomic_read(&idle_hook_initialized) < 1000)
;
}
}
/* Have we stupidly left IXMT set somewhere? */
if (read_c0_tcstatus() & 0x400) {
write_c0_tcstatus(read_c0_tcstatus() & ~0x400);
ehb();
printk("Dangling IXMT in cpu_idle()\n");
}
/* Have we stupidly left an IM bit turned off? */
#define IM_LIMIT 2000
local_irq_save(flags);
mtflags = dmt();
pdb_msg = &id_ho_db_msg[0];
im = read_c0_status();
vpe = current_cpu_data.vpe_id;
for (bit = 0; bit < 8; bit++) {
/*
* In current prototype, I/O interrupts
* are masked for VPE > 0
*/
if (vpemask[vpe][bit]) {
if (!(im & (0x100 << bit)))
imstuckcount[vpe][bit]++;
else
imstuckcount[vpe][bit] = 0;
if (imstuckcount[vpe][bit] > IM_LIMIT) {
set_c0_status(0x100 << bit);
ehb();
imstuckcount[vpe][bit] = 0;
pdb_msg += sprintf(pdb_msg,
"Dangling IM %d fixed for VPE %d\n", bit,
vpe);
}
}
}
emt(mtflags);
local_irq_restore(flags);
if (pdb_msg != &id_ho_db_msg[0])
printk("CPU%d: %s", smp_processor_id(), id_ho_db_msg);
#endif /* CONFIG_SMTC_IDLE_HOOK_DEBUG */
smtc_ipi_replay();
}
void smtc_soft_dump(void)
{
int i;
printk("Counter Interrupts taken per CPU (TC)\n");
for (i=0; i < NR_CPUS; i++) {
printk("%d: %ld\n", i, smtc_cpu_stats[i].timerints);
}
printk("Self-IPI invocations:\n");
for (i=0; i < NR_CPUS; i++) {
printk("%d: %ld\n", i, smtc_cpu_stats[i].selfipis);
}
smtc_ipi_qdump();
printk("%d Recoveries of \"stolen\" FPU\n",
atomic_read(&smtc_fpu_recoveries));
}
/*
* TLB management routines special to SMTC
*/
void smtc_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
{
unsigned long flags, mtflags, tcstat, prevhalt, asid;
int tlb, i;
/*
* It would be nice to be able to use a spinlock here,
* but this is invoked from within TLB flush routines
* that protect themselves with DVPE, so if a lock is
* held by another TC, it'll never be freed.
*
* DVPE/DMT must not be done with interrupts enabled,
* so even so most callers will already have disabled
* them, let's be really careful...
*/
local_irq_save(flags);
if (smtc_status & SMTC_TLB_SHARED) {
mtflags = dvpe();
tlb = 0;
} else {
mtflags = dmt();
tlb = cpu_data[cpu].vpe_id;
}
asid = asid_cache(cpu);
do {
if (!((asid += ASID_INC) & ASID_MASK) ) {
if (cpu_has_vtag_icache)
flush_icache_all();
/* Traverse all online CPUs (hack requires contiguous range) */
for_each_online_cpu(i) {
/*
* We don't need to worry about our own CPU, nor those of
* CPUs who don't share our TLB.
*/
if ((i != smp_processor_id()) &&
((smtc_status & SMTC_TLB_SHARED) ||
(cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))) {
settc(cpu_data[i].tc_id);
prevhalt = read_tc_c0_tchalt() & TCHALT_H;
if (!prevhalt) {
write_tc_c0_tchalt(TCHALT_H);
mips_ihb();
}
tcstat = read_tc_c0_tcstatus();
smtc_live_asid[tlb][(tcstat & ASID_MASK)] |= (asiduse)(0x1 << i);
if (!prevhalt)
write_tc_c0_tchalt(0);
}
}
if (!asid) /* fix version if needed */
asid = ASID_FIRST_VERSION;
local_flush_tlb_all(); /* start new asid cycle */
}
} while (smtc_live_asid[tlb][(asid & ASID_MASK)]);
/*
* SMTC shares the TLB within VPEs and possibly across all VPEs.
*/
for_each_online_cpu(i) {
if ((smtc_status & SMTC_TLB_SHARED) ||
(cpu_data[i].vpe_id == cpu_data[cpu].vpe_id))
cpu_context(i, mm) = asid_cache(i) = asid;
}
if (smtc_status & SMTC_TLB_SHARED)
evpe(mtflags);
else
emt(mtflags);
local_irq_restore(flags);
}
/*
* Invoked from macros defined in mmu_context.h
* which must already have disabled interrupts
* and done a DVPE or DMT as appropriate.
*/
void smtc_flush_tlb_asid(unsigned long asid)
{
int entry;
unsigned long ehi;
entry = read_c0_wired();
/* Traverse all non-wired entries */
while (entry < current_cpu_data.tlbsize) {
write_c0_index(entry);
ehb();
tlb_read();
ehb();
ehi = read_c0_entryhi();
if ((ehi & ASID_MASK) == asid) {
/*
* Invalidate only entries with specified ASID,
* makiing sure all entries differ.
*/
write_c0_entryhi(CKSEG0 + (entry << (PAGE_SHIFT + 1)));
write_c0_entrylo0(0);
write_c0_entrylo1(0);
mtc0_tlbw_hazard();
tlb_write_indexed();
}
entry++;
}
write_c0_index(PARKED_INDEX);
tlbw_use_hazard();
}
/*
* Support for single-threading cache flush operations.
*/
static int halt_state_save[NR_CPUS];
/*
* To really, really be sure that nothing is being done
* by other TCs, halt them all. This code assumes that
* a DVPE has already been done, so while their Halted
* state is theoretically architecturally unstable, in
* practice, it's not going to change while we're looking
* at it.
*/
void smtc_cflush_lockdown(void)
{
int cpu;
for_each_online_cpu(cpu) {
if (cpu != smp_processor_id()) {
settc(cpu_data[cpu].tc_id);
halt_state_save[cpu] = read_tc_c0_tchalt();
write_tc_c0_tchalt(TCHALT_H);
}
}
mips_ihb();
}
/* It would be cheating to change the cpu_online states during a flush! */
void smtc_cflush_release(void)
{
int cpu;
/*
* Start with a hazard barrier to ensure
* that all CACHE ops have played through.
*/
mips_ihb();
for_each_online_cpu(cpu) {
if (cpu != smp_processor_id()) {
settc(cpu_data[cpu].tc_id);
write_tc_c0_tchalt(halt_state_save[cpu]);
}
}
mips_ihb();
}
...@@ -6,8 +6,6 @@ ...@@ -6,8 +6,6 @@
* not have done anything significant (but they may have had interrupts * not have done anything significant (but they may have had interrupts
* enabled briefly - prom_smp_finish() should not be responsible for enabling * enabled briefly - prom_smp_finish() should not be responsible for enabling
* interrupts...) * interrupts...)
*
* FIXME: broken for SMTC
*/ */
#include <linux/kernel.h> #include <linux/kernel.h>
...@@ -33,14 +31,6 @@ void synchronise_count_master(int cpu) ...@@ -33,14 +31,6 @@ void synchronise_count_master(int cpu)
unsigned long flags; unsigned long flags;
unsigned int initcount; unsigned int initcount;
#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC needs to synchronise per VPE, not per CPU
* ignore for now
*/
return;
#endif
printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu); printk(KERN_INFO "Synchronize counters for CPU %u: ", cpu);
local_irq_save(flags); local_irq_save(flags);
...@@ -110,14 +100,6 @@ void synchronise_count_slave(int cpu) ...@@ -110,14 +100,6 @@ void synchronise_count_slave(int cpu)
int i; int i;
unsigned int initcount; unsigned int initcount;
#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC needs to synchronise per VPE, not per CPU
* ignore for now
*/
return;
#endif
/* /*
* Not every cpu is online at the time this gets called, * Not every cpu is online at the time this gets called,
* so we first wait for the master to say everyone is ready * so we first wait for the master to say everyone is ready
......
...@@ -26,7 +26,6 @@ ...@@ -26,7 +26,6 @@
#include <asm/cpu-features.h> #include <asm/cpu-features.h>
#include <asm/cpu-type.h> #include <asm/cpu-type.h>
#include <asm/div64.h> #include <asm/div64.h>
#include <asm/smtc_ipi.h>
#include <asm/time.h> #include <asm/time.h>
/* /*
......
...@@ -370,9 +370,6 @@ void __noreturn die(const char *str, struct pt_regs *regs) ...@@ -370,9 +370,6 @@ void __noreturn die(const char *str, struct pt_regs *regs)
{ {
static int die_counter; static int die_counter;
int sig = SIGSEGV; int sig = SIGSEGV;
#ifdef CONFIG_MIPS_MT_SMTC
unsigned long dvpret;
#endif /* CONFIG_MIPS_MT_SMTC */
oops_enter(); oops_enter();
...@@ -382,13 +379,7 @@ void __noreturn die(const char *str, struct pt_regs *regs) ...@@ -382,13 +379,7 @@ void __noreturn die(const char *str, struct pt_regs *regs)
console_verbose(); console_verbose();
raw_spin_lock_irq(&die_lock); raw_spin_lock_irq(&die_lock);
#ifdef CONFIG_MIPS_MT_SMTC
dvpret = dvpe();
#endif /* CONFIG_MIPS_MT_SMTC */
bust_spinlocks(1); bust_spinlocks(1);
#ifdef CONFIG_MIPS_MT_SMTC
mips_mt_regdump(dvpret);
#endif /* CONFIG_MIPS_MT_SMTC */
printk("%s[#%d]:\n", str, ++die_counter); printk("%s[#%d]:\n", str, ++die_counter);
show_registers(regs); show_registers(regs);
...@@ -1759,19 +1750,6 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) ...@@ -1759,19 +1750,6 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
extern char rollback_except_vec_vi; extern char rollback_except_vec_vi;
char *vec_start = using_rollback_handler() ? char *vec_start = using_rollback_handler() ?
&rollback_except_vec_vi : &except_vec_vi; &rollback_except_vec_vi : &except_vec_vi;
#ifdef CONFIG_MIPS_MT_SMTC
/*
* We need to provide the SMTC vectored interrupt handler
* not only with the address of the handler, but with the
* Status.IM bit to be masked before going there.
*/
extern char except_vec_vi_mori;
#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
const int mori_offset = &except_vec_vi_mori - vec_start + 2;
#else
const int mori_offset = &except_vec_vi_mori - vec_start;
#endif
#endif /* CONFIG_MIPS_MT_SMTC */
#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN) #if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
const int lui_offset = &except_vec_vi_lui - vec_start + 2; const int lui_offset = &except_vec_vi_lui - vec_start + 2;
const int ori_offset = &except_vec_vi_ori - vec_start + 2; const int ori_offset = &except_vec_vi_ori - vec_start + 2;
...@@ -1795,12 +1773,6 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs) ...@@ -1795,12 +1773,6 @@ static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
#else #else
handler_len); handler_len);
#endif #endif
#ifdef CONFIG_MIPS_MT_SMTC
BUG_ON(n > 7); /* Vector index %d exceeds SMTC maximum. */
h = (u16 *)(b + mori_offset);
*h = (0x100 << n);
#endif /* CONFIG_MIPS_MT_SMTC */
h = (u16 *)(b + lui_offset); h = (u16 *)(b + lui_offset);
*h = (handler >> 16) & 0xffff; *h = (handler >> 16) & 0xffff;
h = (u16 *)(b + ori_offset); h = (u16 *)(b + ori_offset);
...@@ -1870,20 +1842,6 @@ void per_cpu_trap_init(bool is_boot_cpu) ...@@ -1870,20 +1842,6 @@ void per_cpu_trap_init(bool is_boot_cpu)
unsigned int cpu = smp_processor_id(); unsigned int cpu = smp_processor_id();
unsigned int status_set = ST0_CU0; unsigned int status_set = ST0_CU0;
unsigned int hwrena = cpu_hwrena_impl_bits; unsigned int hwrena = cpu_hwrena_impl_bits;
#ifdef CONFIG_MIPS_MT_SMTC
int secondaryTC = 0;
int bootTC = (cpu == 0);
/*
* Only do per_cpu_trap_init() for first TC of Each VPE.
* Note that this hack assumes that the SMTC init code
* assigns TCs consecutively and in ascending order.
*/
if (((read_c0_tcbind() & TCBIND_CURTC) != 0) &&
((read_c0_tcbind() & TCBIND_CURVPE) == cpu_data[cpu - 1].vpe_id))
secondaryTC = 1;
#endif /* CONFIG_MIPS_MT_SMTC */
/* /*
* Disable coprocessors and select 32-bit or 64-bit addressing * Disable coprocessors and select 32-bit or 64-bit addressing
...@@ -1911,10 +1869,6 @@ void per_cpu_trap_init(bool is_boot_cpu) ...@@ -1911,10 +1869,6 @@ void per_cpu_trap_init(bool is_boot_cpu)
if (hwrena) if (hwrena)
write_c0_hwrena(hwrena); write_c0_hwrena(hwrena);
#ifdef CONFIG_MIPS_MT_SMTC
if (!secondaryTC) {
#endif /* CONFIG_MIPS_MT_SMTC */
if (cpu_has_veic || cpu_has_vint) { if (cpu_has_veic || cpu_has_vint) {
unsigned long sr = set_c0_status(ST0_BEV); unsigned long sr = set_c0_status(ST0_BEV);
write_c0_ebase(ebase); write_c0_ebase(ebase);
...@@ -1949,10 +1903,6 @@ void per_cpu_trap_init(bool is_boot_cpu) ...@@ -1949,10 +1903,6 @@ void per_cpu_trap_init(bool is_boot_cpu)
cp0_perfcount_irq = -1; cp0_perfcount_irq = -1;
} }
#ifdef CONFIG_MIPS_MT_SMTC
}
#endif /* CONFIG_MIPS_MT_SMTC */
if (!cpu_data[cpu].asid_cache) if (!cpu_data[cpu].asid_cache)
cpu_data[cpu].asid_cache = ASID_FIRST_VERSION; cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
...@@ -1961,23 +1911,10 @@ void per_cpu_trap_init(bool is_boot_cpu) ...@@ -1961,23 +1911,10 @@ void per_cpu_trap_init(bool is_boot_cpu)
BUG_ON(current->mm); BUG_ON(current->mm);
enter_lazy_tlb(&init_mm, current); enter_lazy_tlb(&init_mm, current);
#ifdef CONFIG_MIPS_MT_SMTC
if (bootTC) {
#endif /* CONFIG_MIPS_MT_SMTC */
/* Boot CPU's cache setup in setup_arch(). */ /* Boot CPU's cache setup in setup_arch(). */
if (!is_boot_cpu) if (!is_boot_cpu)
cpu_cache_init(); cpu_cache_init();
tlb_init(); tlb_init();
#ifdef CONFIG_MIPS_MT_SMTC
} else if (!secondaryTC) {
/*
* First TC in non-boot VPE must do subset of tlb_init()
* for MMU countrol registers.
*/
write_c0_pagemask(PM_DEFAULT_MASK);
write_c0_wired(0);
}
#endif /* CONFIG_MIPS_MT_SMTC */
TLBMISS_HANDLER_SETUP(); TLBMISS_HANDLER_SETUP();
} }
......
...@@ -127,9 +127,8 @@ int vpe_run(struct vpe *v) ...@@ -127,9 +127,8 @@ int vpe_run(struct vpe *v)
clear_c0_mvpcontrol(MVPCONTROL_VPC); clear_c0_mvpcontrol(MVPCONTROL_VPC);
/* /*
* SMTC/SMVP kernels manage VPE enable independently, * SMVP kernels manage VPE enable independently, but uniprocessor
* but uniprocessor kernels need to turn it on, even * kernels need to turn it on, even if that wasn't the pre-dvpe() state.
* if that wasn't the pre-dvpe() state.
*/ */
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
evpe(vpeflags); evpe(vpeflags);
...@@ -454,12 +453,11 @@ int __init vpe_module_init(void) ...@@ -454,12 +453,11 @@ int __init vpe_module_init(void)
settc(tc); settc(tc);
/* Any TC that is bound to VPE0 gets left as is - in /*
* case we are running SMTC on VPE0. A TC that is bound * A TC that is bound to any other VPE gets bound to
* to any other VPE gets bound to VPE0, ideally I'd like * VPE0, ideally I'd like to make it homeless but it
* to make it homeless but it doesn't appear to let me * doesn't appear to let me bind a TC to a non-existent
* bind a TC to a non-existent VPE. Which is perfectly * VPE. Which is perfectly reasonable.
* reasonable.
* *
* The (un)bound state is visible to an EJTAG probe so * The (un)bound state is visible to an EJTAG probe so
* may notify GDB... * may notify GDB...
......
...@@ -61,7 +61,7 @@ ...@@ -61,7 +61,7 @@
/* we have a cascade of 8 irqs */ /* we have a cascade of 8 irqs */
#define MIPS_CPU_IRQ_CASCADE 8 #define MIPS_CPU_IRQ_CASCADE 8
#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) #ifdef CONFIG_MIPS_MT_SMP
int gic_present; int gic_present;
#endif #endif
...@@ -440,7 +440,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent) ...@@ -440,7 +440,7 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
arch_init_ipiirq(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ, &irq_call); arch_init_ipiirq(MIPS_CPU_IRQ_BASE + MIPS_CPU_IPI_CALL_IRQ, &irq_call);
#endif #endif
#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) #ifndef CONFIG_MIPS_MT_SMP
set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 | set_c0_status(IE_IRQ0 | IE_IRQ1 | IE_IRQ2 |
IE_IRQ3 | IE_IRQ4 | IE_IRQ5); IE_IRQ3 | IE_IRQ4 | IE_IRQ5);
#else #else
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
#include <linux/export.h> #include <linux/export.h>
#include <linux/stringify.h> #include <linux/stringify.h>
#if !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) #ifndef CONFIG_CPU_MIPSR2
/* /*
* For cli() we have to insert nops to make sure that the new value * For cli() we have to insert nops to make sure that the new value
...@@ -42,12 +42,7 @@ notrace void arch_local_irq_disable(void) ...@@ -42,12 +42,7 @@ notrace void arch_local_irq_disable(void)
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n" " .set push \n"
" .set noat \n" " .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC #if defined(CONFIG_CPU_MIPSR2)
" mfc0 $1, $2, 1 \n"
" ori $1, 0x400 \n"
" .set noreorder \n"
" mtc0 $1, $2, 1 \n"
#elif defined(CONFIG_CPU_MIPSR2)
/* see irqflags.h for inline function */ /* see irqflags.h for inline function */
#else #else
" mfc0 $1,$12 \n" " mfc0 $1,$12 \n"
...@@ -77,13 +72,7 @@ notrace unsigned long arch_local_irq_save(void) ...@@ -77,13 +72,7 @@ notrace unsigned long arch_local_irq_save(void)
" .set push \n" " .set push \n"
" .set reorder \n" " .set reorder \n"
" .set noat \n" " .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC #if defined(CONFIG_CPU_MIPSR2)
" mfc0 %[flags], $2, 1 \n"
" ori $1, %[flags], 0x400 \n"
" .set noreorder \n"
" mtc0 $1, $2, 1 \n"
" andi %[flags], %[flags], 0x400 \n"
#elif defined(CONFIG_CPU_MIPSR2)
/* see irqflags.h for inline function */ /* see irqflags.h for inline function */
#else #else
" mfc0 %[flags], $12 \n" " mfc0 %[flags], $12 \n"
...@@ -108,29 +97,13 @@ notrace void arch_local_irq_restore(unsigned long flags) ...@@ -108,29 +97,13 @@ notrace void arch_local_irq_restore(unsigned long flags)
{ {
unsigned long __tmp1; unsigned long __tmp1;
#ifdef CONFIG_MIPS_MT_SMTC
/*
* SMTC kernel needs to do a software replay of queued
* IPIs, at the cost of branch and call overhead on each
* local_irq_restore()
*/
if (unlikely(!(flags & 0x0400)))
smtc_ipi_replay();
#endif
preempt_disable(); preempt_disable();
__asm__ __volatile__( __asm__ __volatile__(
" .set push \n" " .set push \n"
" .set noreorder \n" " .set noreorder \n"
" .set noat \n" " .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC #if defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
" mfc0 $1, $2, 1 \n"
" andi %[flags], 0x400 \n"
" ori $1, 0x400 \n"
" xori $1, 0x400 \n"
" or %[flags], $1 \n"
" mtc0 %[flags], $2, 1 \n"
#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
/* see irqflags.h for inline function */ /* see irqflags.h for inline function */
#elif defined(CONFIG_CPU_MIPSR2) #elif defined(CONFIG_CPU_MIPSR2)
/* see irqflags.h for inline function */ /* see irqflags.h for inline function */
...@@ -163,14 +136,7 @@ notrace void __arch_local_irq_restore(unsigned long flags) ...@@ -163,14 +136,7 @@ notrace void __arch_local_irq_restore(unsigned long flags)
" .set push \n" " .set push \n"
" .set noreorder \n" " .set noreorder \n"
" .set noat \n" " .set noat \n"
#ifdef CONFIG_MIPS_MT_SMTC #if defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
" mfc0 $1, $2, 1 \n"
" andi %[flags], 0x400 \n"
" ori $1, 0x400 \n"
" xori $1, 0x400 \n"
" or %[flags], $1 \n"
" mtc0 %[flags], $2, 1 \n"
#elif defined(CONFIG_CPU_MIPSR2) && defined(CONFIG_IRQ_CPU)
/* see irqflags.h for inline function */ /* see irqflags.h for inline function */
#elif defined(CONFIG_CPU_MIPSR2) #elif defined(CONFIG_CPU_MIPSR2)
/* see irqflags.h for inline function */ /* see irqflags.h for inline function */
...@@ -192,4 +158,4 @@ notrace void __arch_local_irq_restore(unsigned long flags) ...@@ -192,4 +158,4 @@ notrace void __arch_local_irq_restore(unsigned long flags)
} }
EXPORT_SYMBOL(__arch_local_irq_restore); EXPORT_SYMBOL(__arch_local_irq_restore);
#endif /* !defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_MIPS_MT_SMTC) */ #endif /* !CONFIG_CPU_MIPSR2 */
...@@ -50,7 +50,7 @@ static inline void r4k_on_each_cpu(void (*func) (void *info), void *info) ...@@ -50,7 +50,7 @@ static inline void r4k_on_each_cpu(void (*func) (void *info), void *info)
{ {
preempt_disable(); preempt_disable();
#if !defined(CONFIG_MIPS_MT_SMP) && !defined(CONFIG_MIPS_MT_SMTC) #ifndef CONFIG_MIPS_MT_SMP
smp_call_function(func, info, 1); smp_call_function(func, info, 1);
#endif #endif
func(info); func(info);
...@@ -427,7 +427,7 @@ static void r4k___flush_cache_all(void) ...@@ -427,7 +427,7 @@ static void r4k___flush_cache_all(void)
static inline int has_valid_asid(const struct mm_struct *mm) static inline int has_valid_asid(const struct mm_struct *mm)
{ {
#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_MIPS_MT_SMTC) #ifdef CONFIG_MIPS_MT_SMP
int i; int i;
for_each_online_cpu(i) for_each_online_cpu(i)
......
...@@ -44,27 +44,6 @@ ...@@ -44,27 +44,6 @@
#include <asm/tlb.h> #include <asm/tlb.h>
#include <asm/fixmap.h> #include <asm/fixmap.h>
/* Atomicity and interruptability */
#ifdef CONFIG_MIPS_MT_SMTC
#include <asm/mipsmtregs.h>
#define ENTER_CRITICAL(flags) \
{ \
unsigned int mvpflags; \
local_irq_save(flags);\
mvpflags = dvpe()
#define EXIT_CRITICAL(flags) \
evpe(mvpflags); \
local_irq_restore(flags); \
}
#else
#define ENTER_CRITICAL(flags) local_irq_save(flags)
#define EXIT_CRITICAL(flags) local_irq_restore(flags)
#endif /* CONFIG_MIPS_MT_SMTC */
/* /*
* We have up to 8 empty zeroed pages so we can map one of the right colour * We have up to 8 empty zeroed pages so we can map one of the right colour
* when needed. This is necessary only on R4000 / R4400 SC and MC versions * when needed. This is necessary only on R4000 / R4400 SC and MC versions
...@@ -100,20 +79,6 @@ void setup_zero_pages(void) ...@@ -100,20 +79,6 @@ void setup_zero_pages(void)
zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK; zero_page_mask = ((PAGE_SIZE << order) - 1) & PAGE_MASK;
} }
#ifdef CONFIG_MIPS_MT_SMTC
static pte_t *kmap_coherent_pte;
static void __init kmap_coherent_init(void)
{
unsigned long vaddr;
/* cache the first coherent kmap pte */
vaddr = __fix_to_virt(FIX_CMAP_BEGIN);
kmap_coherent_pte = kmap_get_fixmap_pte(vaddr);
}
#else
static inline void kmap_coherent_init(void) {}
#endif
void *kmap_coherent(struct page *page, unsigned long addr) void *kmap_coherent(struct page *page, unsigned long addr)
{ {
enum fixed_addresses idx; enum fixed_addresses idx;
...@@ -126,12 +91,7 @@ void *kmap_coherent(struct page *page, unsigned long addr) ...@@ -126,12 +91,7 @@ void *kmap_coherent(struct page *page, unsigned long addr)
pagefault_disable(); pagefault_disable();
idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1); idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
#ifdef CONFIG_MIPS_MT_SMTC
idx += FIX_N_COLOURS * smp_processor_id() +
(in_interrupt() ? (FIX_N_COLOURS * NR_CPUS) : 0);
#else
idx += in_interrupt() ? FIX_N_COLOURS : 0; idx += in_interrupt() ? FIX_N_COLOURS : 0;
#endif
vaddr = __fix_to_virt(FIX_CMAP_END - idx); vaddr = __fix_to_virt(FIX_CMAP_END - idx);
pte = mk_pte(page, PAGE_KERNEL); pte = mk_pte(page, PAGE_KERNEL);
#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32) #if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
...@@ -140,44 +100,29 @@ void *kmap_coherent(struct page *page, unsigned long addr) ...@@ -140,44 +100,29 @@ void *kmap_coherent(struct page *page, unsigned long addr)
entrylo = pte_to_entrylo(pte_val(pte)); entrylo = pte_to_entrylo(pte_val(pte));
#endif #endif
ENTER_CRITICAL(flags); local_irq_save(flags);
old_ctx = read_c0_entryhi(); old_ctx = read_c0_entryhi();
write_c0_entryhi(vaddr & (PAGE_MASK << 1)); write_c0_entryhi(vaddr & (PAGE_MASK << 1));
write_c0_entrylo0(entrylo); write_c0_entrylo0(entrylo);
write_c0_entrylo1(entrylo); write_c0_entrylo1(entrylo);
#ifdef CONFIG_MIPS_MT_SMTC
set_pte(kmap_coherent_pte - (FIX_CMAP_END - idx), pte);
/* preload TLB instead of local_flush_tlb_one() */
mtc0_tlbw_hazard();
tlb_probe();
tlb_probe_hazard();
tlbidx = read_c0_index();
mtc0_tlbw_hazard();
if (tlbidx < 0)
tlb_write_random();
else
tlb_write_indexed();
#else
tlbidx = read_c0_wired(); tlbidx = read_c0_wired();
write_c0_wired(tlbidx + 1); write_c0_wired(tlbidx + 1);
write_c0_index(tlbidx); write_c0_index(tlbidx);
mtc0_tlbw_hazard(); mtc0_tlbw_hazard();
tlb_write_indexed(); tlb_write_indexed();
#endif
tlbw_use_hazard(); tlbw_use_hazard();
write_c0_entryhi(old_ctx); write_c0_entryhi(old_ctx);
EXIT_CRITICAL(flags); local_irq_restore(flags);
return (void*) vaddr; return (void*) vaddr;
} }
void kunmap_coherent(void) void kunmap_coherent(void)
{ {
#ifndef CONFIG_MIPS_MT_SMTC
unsigned int wired; unsigned int wired;
unsigned long flags, old_ctx; unsigned long flags, old_ctx;
ENTER_CRITICAL(flags); local_irq_save(flags);
old_ctx = read_c0_entryhi(); old_ctx = read_c0_entryhi();
wired = read_c0_wired() - 1; wired = read_c0_wired() - 1;
write_c0_wired(wired); write_c0_wired(wired);
...@@ -189,8 +134,7 @@ void kunmap_coherent(void) ...@@ -189,8 +134,7 @@ void kunmap_coherent(void)
tlb_write_indexed(); tlb_write_indexed();
tlbw_use_hazard(); tlbw_use_hazard();
write_c0_entryhi(old_ctx); write_c0_entryhi(old_ctx);
EXIT_CRITICAL(flags); local_irq_restore(flags);
#endif
pagefault_enable(); pagefault_enable();
} }
...@@ -256,7 +200,7 @@ EXPORT_SYMBOL_GPL(copy_from_user_page); ...@@ -256,7 +200,7 @@ EXPORT_SYMBOL_GPL(copy_from_user_page);
void __init fixrange_init(unsigned long start, unsigned long end, void __init fixrange_init(unsigned long start, unsigned long end,
pgd_t *pgd_base) pgd_t *pgd_base)
{ {
#if defined(CONFIG_HIGHMEM) || defined(CONFIG_MIPS_MT_SMTC) #ifdef CONFIG_HIGHMEM
pgd_t *pgd; pgd_t *pgd;
pud_t *pud; pud_t *pud;
pmd_t *pmd; pmd_t *pmd;
...@@ -327,8 +271,6 @@ void __init paging_init(void) ...@@ -327,8 +271,6 @@ void __init paging_init(void)
#ifdef CONFIG_HIGHMEM #ifdef CONFIG_HIGHMEM
kmap_init(); kmap_init();
#endif #endif
kmap_coherent_init();
#ifdef CONFIG_ZONE_DMA #ifdef CONFIG_ZONE_DMA
max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN; max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
#endif #endif
......
...@@ -25,28 +25,6 @@ ...@@ -25,28 +25,6 @@
extern void build_tlb_refill_handler(void); extern void build_tlb_refill_handler(void);
/* Atomicity and interruptability */
#ifdef CONFIG_MIPS_MT_SMTC
#include <asm/smtc.h>
#include <asm/mipsmtregs.h>
#define ENTER_CRITICAL(flags) \
{ \
unsigned int mvpflags; \
local_irq_save(flags);\
mvpflags = dvpe()
#define EXIT_CRITICAL(flags) \
evpe(mvpflags); \
local_irq_restore(flags); \
}
#else
#define ENTER_CRITICAL(flags) local_irq_save(flags)
#define EXIT_CRITICAL(flags) local_irq_restore(flags)
#endif /* CONFIG_MIPS_MT_SMTC */
/* /*
* LOONGSON2/3 has a 4 entry itlb which is a subset of dtlb, * LOONGSON2/3 has a 4 entry itlb which is a subset of dtlb,
* unfortunately, itlb is not totally transparent to software. * unfortunately, itlb is not totally transparent to software.
...@@ -75,7 +53,7 @@ void local_flush_tlb_all(void) ...@@ -75,7 +53,7 @@ void local_flush_tlb_all(void)
unsigned long old_ctx; unsigned long old_ctx;
int entry, ftlbhighset; int entry, ftlbhighset;
ENTER_CRITICAL(flags); local_irq_save(flags);
/* Save old context and create impossible VPN2 value */ /* Save old context and create impossible VPN2 value */
old_ctx = read_c0_entryhi(); old_ctx = read_c0_entryhi();
write_c0_entrylo0(0); write_c0_entrylo0(0);
...@@ -112,7 +90,7 @@ void local_flush_tlb_all(void) ...@@ -112,7 +90,7 @@ void local_flush_tlb_all(void)
tlbw_use_hazard(); tlbw_use_hazard();
write_c0_entryhi(old_ctx); write_c0_entryhi(old_ctx);
flush_itlb(); flush_itlb();
EXIT_CRITICAL(flags); local_irq_restore(flags);
} }
EXPORT_SYMBOL(local_flush_tlb_all); EXPORT_SYMBOL(local_flush_tlb_all);
...@@ -142,7 +120,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, ...@@ -142,7 +120,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
if (cpu_context(cpu, mm) != 0) { if (cpu_context(cpu, mm) != 0) {
unsigned long size, flags; unsigned long size, flags;
ENTER_CRITICAL(flags); local_irq_save(flags);
start = round_down(start, PAGE_SIZE << 1); start = round_down(start, PAGE_SIZE << 1);
end = round_up(end, PAGE_SIZE << 1); end = round_up(end, PAGE_SIZE << 1);
size = (end - start) >> (PAGE_SHIFT + 1); size = (end - start) >> (PAGE_SHIFT + 1);
...@@ -176,7 +154,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, ...@@ -176,7 +154,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
drop_mmu_context(mm, cpu); drop_mmu_context(mm, cpu);
} }
flush_itlb(); flush_itlb();
EXIT_CRITICAL(flags); local_irq_restore(flags);
} }
} }
...@@ -184,7 +162,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) ...@@ -184,7 +162,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
{ {
unsigned long size, flags; unsigned long size, flags;
ENTER_CRITICAL(flags); local_irq_save(flags);
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT; size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
size = (size + 1) >> 1; size = (size + 1) >> 1;
if (size <= (current_cpu_data.tlbsizeftlbsets ? if (size <= (current_cpu_data.tlbsizeftlbsets ?
...@@ -220,7 +198,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end) ...@@ -220,7 +198,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
local_flush_tlb_all(); local_flush_tlb_all();
} }
flush_itlb(); flush_itlb();
EXIT_CRITICAL(flags); local_irq_restore(flags);
} }
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
...@@ -233,7 +211,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) ...@@ -233,7 +211,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
newpid = cpu_asid(cpu, vma->vm_mm); newpid = cpu_asid(cpu, vma->vm_mm);
page &= (PAGE_MASK << 1); page &= (PAGE_MASK << 1);
ENTER_CRITICAL(flags); local_irq_save(flags);
oldpid = read_c0_entryhi(); oldpid = read_c0_entryhi();
write_c0_entryhi(page | newpid); write_c0_entryhi(page | newpid);
mtc0_tlbw_hazard(); mtc0_tlbw_hazard();
...@@ -253,7 +231,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) ...@@ -253,7 +231,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
finish: finish:
write_c0_entryhi(oldpid); write_c0_entryhi(oldpid);
flush_itlb_vm(vma); flush_itlb_vm(vma);
EXIT_CRITICAL(flags); local_irq_restore(flags);
} }
} }
...@@ -266,7 +244,7 @@ void local_flush_tlb_one(unsigned long page) ...@@ -266,7 +244,7 @@ void local_flush_tlb_one(unsigned long page)
unsigned long flags; unsigned long flags;
int oldpid, idx; int oldpid, idx;
ENTER_CRITICAL(flags); local_irq_save(flags);
oldpid = read_c0_entryhi(); oldpid = read_c0_entryhi();
page &= (PAGE_MASK << 1); page &= (PAGE_MASK << 1);
write_c0_entryhi(page); write_c0_entryhi(page);
...@@ -285,7 +263,7 @@ void local_flush_tlb_one(unsigned long page) ...@@ -285,7 +263,7 @@ void local_flush_tlb_one(unsigned long page)
} }
write_c0_entryhi(oldpid); write_c0_entryhi(oldpid);
flush_itlb(); flush_itlb();
EXIT_CRITICAL(flags); local_irq_restore(flags);
} }
/* /*
...@@ -308,7 +286,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) ...@@ -308,7 +286,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
if (current->active_mm != vma->vm_mm) if (current->active_mm != vma->vm_mm)
return; return;
ENTER_CRITICAL(flags); local_irq_save(flags);
pid = read_c0_entryhi() & ASID_MASK; pid = read_c0_entryhi() & ASID_MASK;
address &= (PAGE_MASK << 1); address &= (PAGE_MASK << 1);
...@@ -358,7 +336,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte) ...@@ -358,7 +336,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
} }
tlbw_use_hazard(); tlbw_use_hazard();
flush_itlb_vm(vma); flush_itlb_vm(vma);
EXIT_CRITICAL(flags); local_irq_restore(flags);
} }
void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
...@@ -369,7 +347,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, ...@@ -369,7 +347,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
unsigned long old_pagemask; unsigned long old_pagemask;
unsigned long old_ctx; unsigned long old_ctx;
ENTER_CRITICAL(flags); local_irq_save(flags);
/* Save old context and create impossible VPN2 value */ /* Save old context and create impossible VPN2 value */
old_ctx = read_c0_entryhi(); old_ctx = read_c0_entryhi();
old_pagemask = read_c0_pagemask(); old_pagemask = read_c0_pagemask();
...@@ -389,7 +367,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1, ...@@ -389,7 +367,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
tlbw_use_hazard(); /* What is the hazard here? */ tlbw_use_hazard(); /* What is the hazard here? */
write_c0_pagemask(old_pagemask); write_c0_pagemask(old_pagemask);
local_flush_tlb_all(); local_flush_tlb_all();
EXIT_CRITICAL(flags); local_irq_restore(flags);
} }
#ifdef CONFIG_TRANSPARENT_HUGEPAGE #ifdef CONFIG_TRANSPARENT_HUGEPAGE
...@@ -399,13 +377,13 @@ int __init has_transparent_hugepage(void) ...@@ -399,13 +377,13 @@ int __init has_transparent_hugepage(void)
unsigned int mask; unsigned int mask;
unsigned long flags; unsigned long flags;
ENTER_CRITICAL(flags); local_irq_save(flags);
write_c0_pagemask(PM_HUGE_MASK); write_c0_pagemask(PM_HUGE_MASK);
back_to_back_c0_hazard(); back_to_back_c0_hazard();
mask = read_c0_pagemask(); mask = read_c0_pagemask();
write_c0_pagemask(PM_DEFAULT_MASK); write_c0_pagemask(PM_DEFAULT_MASK);
EXIT_CRITICAL(flags); local_irq_restore(flags);
return mask == PM_HUGE_MASK; return mask == PM_HUGE_MASK;
} }
......
...@@ -8,6 +8,3 @@ ...@@ -8,6 +8,3 @@
obj-y := malta-amon.o malta-display.o malta-init.o \ obj-y := malta-amon.o malta-display.o malta-init.o \
malta-int.o malta-memory.o malta-platform.o \ malta-int.o malta-memory.o malta-platform.o \
malta-reset.o malta-setup.o malta-time.o malta-reset.o malta-setup.o malta-time.o
# FIXME FIXME FIXME
obj-$(CONFIG_MIPS_MT_SMTC) += malta-smtc.o
...@@ -116,8 +116,6 @@ phys_t mips_cpc_default_phys_base(void) ...@@ -116,8 +116,6 @@ phys_t mips_cpc_default_phys_base(void)
return CPC_BASE_ADDR; return CPC_BASE_ADDR;
} }
extern struct plat_smp_ops msmtc_smp_ops;
void __init prom_init(void) void __init prom_init(void)
{ {
mips_display_message("LINUX"); mips_display_message("LINUX");
...@@ -304,8 +302,4 @@ void __init prom_init(void) ...@@ -304,8 +302,4 @@ void __init prom_init(void)
return; return;
if (!register_vsmp_smp_ops()) if (!register_vsmp_smp_ops())
return; return;
#ifdef CONFIG_MIPS_MT_SMTC
register_smp_ops(&msmtc_smp_ops);
#endif
} }
...@@ -504,28 +504,9 @@ void __init arch_init_irq(void) ...@@ -504,28 +504,9 @@ void __init arch_init_irq(void)
} else if (cpu_has_vint) { } else if (cpu_has_vint) {
set_vi_handler(MIPSCPU_INT_I8259A, malta_hw0_irqdispatch); set_vi_handler(MIPSCPU_INT_I8259A, malta_hw0_irqdispatch);
set_vi_handler(MIPSCPU_INT_COREHI, corehi_irqdispatch); set_vi_handler(MIPSCPU_INT_COREHI, corehi_irqdispatch);
#ifdef CONFIG_MIPS_MT_SMTC
setup_irq_smtc(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_I8259A, &i8259irq,
(0x100 << MIPSCPU_INT_I8259A));
setup_irq_smtc(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_COREHI,
&corehi_irqaction, (0x100 << MIPSCPU_INT_COREHI));
/*
* Temporary hack to ensure that the subsidiary device
* interrupts coing in via the i8259A, but associated
* with low IRQ numbers, will restore the Status.IM
* value associated with the i8259A.
*/
{
int i;
for (i = 0; i < 16; i++)
irq_hwmask[i] = (0x100 << MIPSCPU_INT_I8259A);
}
#else /* Not SMTC */
setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_I8259A, &i8259irq); setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_I8259A, &i8259irq);
setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_COREHI, setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_COREHI,
&corehi_irqaction); &corehi_irqaction);
#endif /* CONFIG_MIPS_MT_SMTC */
} else { } else {
setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_I8259A, &i8259irq); setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_I8259A, &i8259irq);
setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_COREHI, setup_irq(MIPS_CPU_IRQ_BASE+MIPSCPU_INT_COREHI,
......
...@@ -77,11 +77,7 @@ const char *get_system_type(void) ...@@ -77,11 +77,7 @@ const char *get_system_type(void)
return "MIPS Malta"; return "MIPS Malta";
} }
#if defined(CONFIG_MIPS_MT_SMTC)
const char display_string[] = " SMTC LINUX ON MALTA ";
#else
const char display_string[] = " LINUX ON MALTA "; const char display_string[] = " LINUX ON MALTA ";
#endif /* CONFIG_MIPS_MT_SMTC */
#ifdef CONFIG_BLK_DEV_FD #ifdef CONFIG_BLK_DEV_FD
static void __init fd_activate(void) static void __init fd_activate(void)
......
/*
* Malta Platform-specific hooks for SMP operation
*/
#include <linux/irq.h>
#include <linux/init.h>
#include <asm/mipsregs.h>
#include <asm/mipsmtregs.h>
#include <asm/smtc.h>
#include <asm/smtc_ipi.h>
/* VPE/SMP Prototype implements platform interfaces directly */
/*
* Cause the specified action to be performed on a targeted "CPU"
*/
static void msmtc_send_ipi_single(int cpu, unsigned int action)
{
/* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
smtc_send_ipi(cpu, LINUX_SMP_IPI, action);
}
static void msmtc_send_ipi_mask(const struct cpumask *mask, unsigned int action)
{
unsigned int i;
for_each_cpu(i, mask)
msmtc_send_ipi_single(i, action);
}
/*
* Post-config but pre-boot cleanup entry point
*/
static void msmtc_init_secondary(void)
{
int myvpe;
/* Don't enable Malta I/O interrupts (IP2) for secondary VPEs */
myvpe = read_c0_tcbind() & TCBIND_CURVPE;
if (myvpe != 0) {
/* Ideally, this should be done only once per VPE, but... */
clear_c0_status(ST0_IM);
set_c0_status((0x100 << cp0_compare_irq)
| (0x100 << MIPS_CPU_IPI_IRQ));
if (cp0_perfcount_irq >= 0)
set_c0_status(0x100 << cp0_perfcount_irq);
}
smtc_init_secondary();
}
/*
* Platform "CPU" startup hook
*/
static void msmtc_boot_secondary(int cpu, struct task_struct *idle)
{
smtc_boot_secondary(cpu, idle);
}
/*
* SMP initialization finalization entry point
*/
static void msmtc_smp_finish(void)
{
smtc_smp_finish();
}
/*
* Hook for after all CPUs are online
*/
static void msmtc_cpus_done(void)
{
}
/*
* Platform SMP pre-initialization
*
* As noted above, we can assume a single CPU for now
* but it may be multithreaded.
*/
static void __init msmtc_smp_setup(void)
{
/*
* we won't get the definitive value until
* we've run smtc_prepare_cpus later, but
* we would appear to need an upper bound now.
*/
smp_num_siblings = smtc_build_cpu_map(0);
}
static void __init msmtc_prepare_cpus(unsigned int max_cpus)
{
smtc_prepare_cpus(max_cpus);
}
struct plat_smp_ops msmtc_smp_ops = {
.send_ipi_single = msmtc_send_ipi_single,
.send_ipi_mask = msmtc_send_ipi_mask,
.init_secondary = msmtc_init_secondary,
.smp_finish = msmtc_smp_finish,
.cpus_done = msmtc_cpus_done,
.boot_secondary = msmtc_boot_secondary,
.smp_setup = msmtc_smp_setup,
.prepare_cpus = msmtc_prepare_cpus,
};
#ifdef CONFIG_MIPS_MT_SMTC_IRQAFF
/*
* IRQ affinity hook
*/
int plat_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity,
bool force)
{
cpumask_t tmask;
int cpu = 0;
void smtc_set_irq_affinity(unsigned int irq, cpumask_t aff);
/*
* On the legacy Malta development board, all I/O interrupts
* are routed through the 8259 and combined in a single signal
* to the CPU daughterboard, and on the CoreFPGA2/3 34K models,
* that signal is brought to IP2 of both VPEs. To avoid racing
* concurrent interrupt service events, IP2 is enabled only on
* one VPE, by convention VPE0. So long as no bits are ever
* cleared in the affinity mask, there will never be any
* interrupt forwarding. But as soon as a program or operator
* sets affinity for one of the related IRQs, we need to make
* sure that we don't ever try to forward across the VPE boundary,
* at least not until we engineer a system where the interrupt
* _ack() or _end() function can somehow know that it corresponds
* to an interrupt taken on another VPE, and perform the appropriate
* restoration of Status.IM state using MFTR/MTTR instead of the
* normal local behavior. We also ensure that no attempt will
* be made to forward to an offline "CPU".
*/
cpumask_copy(&tmask, affinity);
for_each_cpu(cpu, affinity) {
if ((cpu_data[cpu].vpe_id != 0) || !cpu_online(cpu))
cpu_clear(cpu, tmask);
}
cpumask_copy(d->affinity, &tmask);
if (cpus_empty(tmask))
/*
* We could restore a default mask here, but the
* runtime code can anyway deal with the null set
*/
printk(KERN_WARNING
"IRQ affinity leaves no legal CPU for IRQ %d\n", d->irq);
/* Do any generic SMTC IRQ affinity setup */
smtc_set_irq_affinity(d->irq, tmask);
return IRQ_SET_MASK_OK_NOCOPY;
}
#endif /* CONFIG_MIPS_MT_SMTC_IRQAFF */
...@@ -10,4 +10,3 @@ obj-$(CONFIG_PCI) += msp_pci.o ...@@ -10,4 +10,3 @@ obj-$(CONFIG_PCI) += msp_pci.o
obj-$(CONFIG_MSP_HAS_MAC) += msp_eth.o obj-$(CONFIG_MSP_HAS_MAC) += msp_eth.o
obj-$(CONFIG_MSP_HAS_USB) += msp_usb.o obj-$(CONFIG_MSP_HAS_USB) += msp_usb.o
obj-$(CONFIG_MIPS_MT_SMP) += msp_smp.o obj-$(CONFIG_MIPS_MT_SMP) += msp_smp.o
obj-$(CONFIG_MIPS_MT_SMTC) += msp_smtc.o
...@@ -32,7 +32,7 @@ extern void msp_vsmp_int_init(void); ...@@ -32,7 +32,7 @@ extern void msp_vsmp_int_init(void);
/* vectored interrupt implementation */ /* vectored interrupt implementation */
/* SW0/1 interrupts are used for SMP/SMTC */ /* SW0/1 interrupts are used for SMP */
static inline void mac0_int_dispatch(void) { do_IRQ(MSP_INT_MAC0); } static inline void mac0_int_dispatch(void) { do_IRQ(MSP_INT_MAC0); }
static inline void mac1_int_dispatch(void) { do_IRQ(MSP_INT_MAC1); } static inline void mac1_int_dispatch(void) { do_IRQ(MSP_INT_MAC1); }
static inline void mac2_int_dispatch(void) { do_IRQ(MSP_INT_SAR); } static inline void mac2_int_dispatch(void) { do_IRQ(MSP_INT_SAR); }
...@@ -138,14 +138,6 @@ void __init arch_init_irq(void) ...@@ -138,14 +138,6 @@ void __init arch_init_irq(void)
set_vi_handler(MSP_INT_SEC, sec_int_dispatch); set_vi_handler(MSP_INT_SEC, sec_int_dispatch);
#ifdef CONFIG_MIPS_MT_SMP #ifdef CONFIG_MIPS_MT_SMP
msp_vsmp_int_init(); msp_vsmp_int_init();
#elif defined CONFIG_MIPS_MT_SMTC
/*Set hwmask for all platform devices */
irq_hwmask[MSP_INT_MAC0] = C_IRQ0;
irq_hwmask[MSP_INT_MAC1] = C_IRQ1;
irq_hwmask[MSP_INT_USB] = C_IRQ2;
irq_hwmask[MSP_INT_SAR] = C_IRQ3;
irq_hwmask[MSP_INT_SEC] = C_IRQ5;
#endif /* CONFIG_MIPS_MT_SMP */ #endif /* CONFIG_MIPS_MT_SMP */
#endif /* CONFIG_MIPS_MT */ #endif /* CONFIG_MIPS_MT */
/* setup the cascaded interrupts */ /* setup the cascaded interrupts */
...@@ -153,8 +145,10 @@ void __init arch_init_irq(void) ...@@ -153,8 +145,10 @@ void __init arch_init_irq(void)
setup_irq(MSP_INT_PER, &per_cascade_msp); setup_irq(MSP_INT_PER, &per_cascade_msp);
#else #else
/* setup the 2nd-level SLP register based interrupt controller */ /*
/* VSMP /SMTC support support is not enabled for SLP */ * Setup the 2nd-level SLP register based interrupt controller.
* VSMP support support is not enabled for SLP.
*/
msp_slp_irq_init(); msp_slp_irq_init();
/* setup the cascaded SLP/PER interrupts */ /* setup the cascaded SLP/PER interrupts */
......
...@@ -120,10 +120,9 @@ static void msp_cic_irq_ack(struct irq_data *d) ...@@ -120,10 +120,9 @@ static void msp_cic_irq_ack(struct irq_data *d)
* hurt for the others * hurt for the others
*/ */
*CIC_STS_REG = (1 << (d->irq - MSP_CIC_INTBASE)); *CIC_STS_REG = (1 << (d->irq - MSP_CIC_INTBASE));
smtc_im_ack_irq(d->irq);
} }
/*Note: Limiting to VSMP . Not tested in SMTC */ /* Note: Limiting to VSMP. */
#ifdef CONFIG_MIPS_MT_SMP #ifdef CONFIG_MIPS_MT_SMP
static int msp_cic_irq_set_affinity(struct irq_data *d, static int msp_cic_irq_set_affinity(struct irq_data *d,
...@@ -183,10 +182,6 @@ void __init msp_cic_irq_init(void) ...@@ -183,10 +182,6 @@ void __init msp_cic_irq_init(void)
for (i = MSP_CIC_INTBASE ; i < MSP_CIC_INTBASE + 32 ; i++) { for (i = MSP_CIC_INTBASE ; i < MSP_CIC_INTBASE + 32 ; i++) {
irq_set_chip_and_handler(i, &msp_cic_irq_controller, irq_set_chip_and_handler(i, &msp_cic_irq_controller,
handle_level_irq); handle_level_irq);
#ifdef CONFIG_MIPS_MT_SMTC
/* Mask of CIC interrupt */
irq_hwmask[i] = C_IRQ4;
#endif
} }
/* Initialize the PER interrupt sub-system */ /* Initialize the PER interrupt sub-system */
......
...@@ -113,9 +113,6 @@ void __init msp_per_irq_init(void) ...@@ -113,9 +113,6 @@ void __init msp_per_irq_init(void)
/* initialize all the IRQ descriptors */ /* initialize all the IRQ descriptors */
for (i = MSP_PER_INTBASE; i < MSP_PER_INTBASE + 32; i++) { for (i = MSP_PER_INTBASE; i < MSP_PER_INTBASE + 32; i++) {
irq_set_chip(i, &msp_per_irq_controller); irq_set_chip(i, &msp_per_irq_controller);
#ifdef CONFIG_MIPS_MT_SMTC
irq_hwmask[i] = C_IRQ4;
#endif
} }
} }
......
...@@ -147,8 +147,6 @@ void __init plat_mem_setup(void) ...@@ -147,8 +147,6 @@ void __init plat_mem_setup(void)
pm_power_off = msp_power_off; pm_power_off = msp_power_off;
} }
extern struct plat_smp_ops msp_smtc_smp_ops;
void __init prom_init(void) void __init prom_init(void)
{ {
unsigned long family; unsigned long family;
...@@ -229,9 +227,5 @@ void __init prom_init(void) ...@@ -229,9 +227,5 @@ void __init prom_init(void)
*/ */
msp_serial_setup(); msp_serial_setup();
if (register_vsmp_smp_ops()) { register_vsmp_smp_ops();
#ifdef CONFIG_MIPS_MT_SMTC
register_smp_ops(&msp_smtc_smp_ops);
#endif
}
} }
/*
* MSP71xx Platform-specific hooks for SMP operation
*/
#include <linux/irq.h>
#include <linux/init.h>
#include <asm/mipsmtregs.h>
#include <asm/mipsregs.h>
#include <asm/smtc.h>
#include <asm/smtc_ipi.h>
/* VPE/SMP Prototype implements platform interfaces directly */
/*
* Cause the specified action to be performed on a targeted "CPU"
*/
static void msp_smtc_send_ipi_single(int cpu, unsigned int action)
{
/* "CPU" may be TC of same VPE, VPE of same CPU, or different CPU */
smtc_send_ipi(cpu, LINUX_SMP_IPI, action);
}
static void msp_smtc_send_ipi_mask(const struct cpumask *mask,
unsigned int action)
{
unsigned int i;
for_each_cpu(i, mask)
msp_smtc_send_ipi_single(i, action);
}
/*
* Post-config but pre-boot cleanup entry point
*/
static void msp_smtc_init_secondary(void)
{
int myvpe;
/* Don't enable Malta I/O interrupts (IP2) for secondary VPEs */
myvpe = read_c0_tcbind() & TCBIND_CURVPE;
if (myvpe > 0)
change_c0_status(ST0_IM, STATUSF_IP0 | STATUSF_IP1 |
STATUSF_IP6 | STATUSF_IP7);
smtc_init_secondary();
}
/*
* Platform "CPU" startup hook
*/
static void msp_smtc_boot_secondary(int cpu, struct task_struct *idle)
{
smtc_boot_secondary(cpu, idle);
}
/*
* SMP initialization finalization entry point
*/
static void msp_smtc_smp_finish(void)
{
smtc_smp_finish();
}
/*
* Hook for after all CPUs are online
*/
static void msp_smtc_cpus_done(void)
{
}
/*
* Platform SMP pre-initialization
*
* As noted above, we can assume a single CPU for now
* but it may be multithreaded.
*/
static void __init msp_smtc_smp_setup(void)
{
/*
* we won't get the definitive value until
* we've run smtc_prepare_cpus later, but
*/
if (read_c0_config3() & (1 << 2))
smp_num_siblings = smtc_build_cpu_map(0);
}
static void __init msp_smtc_prepare_cpus(unsigned int max_cpus)
{
smtc_prepare_cpus(max_cpus);
}
struct plat_smp_ops msp_smtc_smp_ops = {
.send_ipi_single = msp_smtc_send_ipi_single,
.send_ipi_mask = msp_smtc_send_ipi_mask,
.init_secondary = msp_smtc_init_secondary,
.smp_finish = msp_smtc_smp_finish,
.cpus_done = msp_smtc_cpus_done,
.boot_secondary = msp_smtc_boot_secondary,
.smp_setup = msp_smtc_smp_setup,
.prepare_cpus = msp_smtc_prepare_cpus,
};
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment