Commit 9b4311ee authored by Linus Torvalds's avatar Linus Torvalds
parents 2949ccf9 0647d8cf
......@@ -99,7 +99,7 @@ CONFIG_ACPI_DEALLOCATE_IRQ=y
# Firmware Drivers
#
CONFIG_EFI_VARS=y
# CONFIG_EFI_PCDP is not set
CONFIG_EFI_PCDP=y
CONFIG_BINFMT_ELF=y
# CONFIG_BINFMT_MISC is not set
......@@ -650,7 +650,7 @@ CONFIG_MMTIMER=y
#
# Console display driver support
#
# CONFIG_VGA_CONSOLE is not set
CONFIG_VGA_CONSOLE=y
CONFIG_DUMMY_CONSOLE=y
#
......
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.12-20050621
# Tue Jun 21 14:03:24 2005
# Linux kernel version: 2.6.13-rc1-20050629
# Wed Jun 29 15:28:12 2005
#
#
......@@ -80,18 +80,29 @@ CONFIG_MCKINLEY=y
# CONFIG_IA64_PAGE_SIZE_8KB is not set
CONFIG_IA64_PAGE_SIZE_16KB=y
# CONFIG_IA64_PAGE_SIZE_64KB is not set
# CONFIG_HZ_100 is not set
CONFIG_HZ_250=y
# CONFIG_HZ_1000 is not set
CONFIG_HZ=250
CONFIG_IA64_L1_CACHE_SHIFT=7
# CONFIG_NUMA is not set
CONFIG_VIRTUAL_MEM_MAP=y
CONFIG_HOLES_IN_ZONE=y
CONFIG_IA64_CYCLONE=y
CONFIG_IOSAPIC=y
# CONFIG_IA64_SGI_SN_XP is not set
CONFIG_FORCE_MAX_ZONEORDER=18
CONFIG_SMP=y
CONFIG_NR_CPUS=4
CONFIG_HOTPLUG_CPU=y
# CONFIG_SCHED_SMT is not set
# CONFIG_PREEMPT is not set
CONFIG_SELECT_MEMORY_MODEL=y
CONFIG_FLATMEM_MANUAL=y
# CONFIG_DISCONTIGMEM_MANUAL is not set
# CONFIG_SPARSEMEM_MANUAL is not set
CONFIG_FLATMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
CONFIG_HAVE_DEC_LOCK=y
CONFIG_IA32_SUPPORT=y
CONFIG_COMPAT=y
......@@ -257,6 +268,7 @@ CONFIG_BLK_DEV_CMD64X=y
# CONFIG_BLK_DEV_HPT366 is not set
# CONFIG_BLK_DEV_SC1200 is not set
CONFIG_BLK_DEV_PIIX=y
# CONFIG_BLK_DEV_IT821X is not set
# CONFIG_BLK_DEV_NS87415 is not set
# CONFIG_BLK_DEV_PDC202XX_OLD is not set
# CONFIG_BLK_DEV_PDC202XX_NEW is not set
......@@ -395,6 +407,7 @@ CONFIG_UNIX=y
CONFIG_INET=y
CONFIG_IP_MULTICAST=y
# CONFIG_IP_ADVANCED_ROUTER is not set
CONFIG_IP_FIB_HASH=y
# CONFIG_IP_PNP is not set
# CONFIG_NET_IPIP is not set
# CONFIG_NET_IPGRE is not set
......@@ -407,6 +420,8 @@ CONFIG_SYN_COOKIES=y
# CONFIG_INET_TUNNEL is not set
CONFIG_IP_TCPDIAG=y
# CONFIG_IP_TCPDIAG_IPV6 is not set
# CONFIG_TCP_CONG_ADVANCED is not set
CONFIG_TCP_CONG_BIC=y
# CONFIG_IPV6 is not set
# CONFIG_NETFILTER is not set
......@@ -598,9 +613,7 @@ CONFIG_GAMEPORT=m
# CONFIG_GAMEPORT_NS558 is not set
# CONFIG_GAMEPORT_L4 is not set
# CONFIG_GAMEPORT_EMU10K1 is not set
# CONFIG_GAMEPORT_VORTEX is not set
# CONFIG_GAMEPORT_FM801 is not set
# CONFIG_GAMEPORT_CS461X is not set
#
# Character devices
......@@ -629,7 +642,6 @@ CONFIG_SERIAL_8250_NR_UARTS=6
CONFIG_SERIAL_8250_EXTENDED=y
CONFIG_SERIAL_8250_SHARE_IRQ=y
# CONFIG_SERIAL_8250_DETECT_IRQ is not set
# CONFIG_SERIAL_8250_MULTIPORT is not set
# CONFIG_SERIAL_8250_RSA is not set
#
......@@ -743,6 +755,7 @@ CONFIG_USB_DEVICEFS=y
CONFIG_USB_EHCI_HCD=m
# CONFIG_USB_EHCI_SPLIT_ISO is not set
# CONFIG_USB_EHCI_ROOT_HUB_TT is not set
# CONFIG_USB_ISP116X_HCD is not set
CONFIG_USB_OHCI_HCD=m
# CONFIG_USB_OHCI_BIG_ENDIAN is not set
CONFIG_USB_OHCI_LITTLE_ENDIAN=y
......@@ -779,9 +792,11 @@ CONFIG_USB_HIDINPUT=y
# CONFIG_USB_HIDDEV is not set
# CONFIG_USB_AIPTEK is not set
# CONFIG_USB_WACOM is not set
# CONFIG_USB_ACECAD is not set
# CONFIG_USB_KBTAB is not set
# CONFIG_USB_POWERMATE is not set
# CONFIG_USB_MTOUCH is not set
# CONFIG_USB_ITMTOUCH is not set
# CONFIG_USB_EGALAX is not set
# CONFIG_USB_XPAD is not set
# CONFIG_USB_ATI_REMOTE is not set
......@@ -838,7 +853,7 @@ CONFIG_USB_HIDINPUT=y
# CONFIG_USB_TEST is not set
#
# USB ATM/DSL drivers
# USB DSL modem support
#
#
......@@ -856,6 +871,10 @@ CONFIG_USB_HIDINPUT=y
#
# CONFIG_INFINIBAND is not set
#
# SN Devices
#
#
# File systems
#
......@@ -863,6 +882,7 @@ CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT2_FS_POSIX_ACL=y
CONFIG_EXT2_FS_SECURITY=y
# CONFIG_EXT2_FS_XIP is not set
CONFIG_EXT3_FS=y
CONFIG_EXT3_FS_XATTR=y
CONFIG_EXT3_FS_POSIX_ACL=y
......@@ -922,7 +942,6 @@ CONFIG_NTFS_FS=m
CONFIG_PROC_FS=y
CONFIG_PROC_KCORE=y
CONFIG_SYSFS=y
# CONFIG_DEVFS_FS is not set
# CONFIG_DEVPTS_FS_XATTR is not set
CONFIG_TMPFS=y
CONFIG_TMPFS_XATTR=y
......@@ -953,15 +972,18 @@ CONFIG_RAMFS=y
#
CONFIG_NFS_FS=m
CONFIG_NFS_V3=y
# CONFIG_NFS_V3_ACL is not set
CONFIG_NFS_V4=y
CONFIG_NFS_DIRECTIO=y
CONFIG_NFSD=m
CONFIG_NFSD_V3=y
# CONFIG_NFSD_V3_ACL is not set
CONFIG_NFSD_V4=y
CONFIG_NFSD_TCP=y
CONFIG_LOCKD=m
CONFIG_LOCKD_V4=y
CONFIG_EXPORTFS=y
CONFIG_NFS_COMMON=y
CONFIG_SUNRPC=m
CONFIG_SUNRPC_GSS=m
CONFIG_RPCSEC_GSS_KRB5=m
......@@ -1069,6 +1091,7 @@ CONFIG_LOG_BUF_SHIFT=20
# CONFIG_DEBUG_KOBJECT is not set
# CONFIG_DEBUG_INFO is not set
# CONFIG_DEBUG_FS is not set
# CONFIG_KPROBES is not set
CONFIG_IA64_GRANULE_16MB=y
# CONFIG_IA64_GRANULE_64MB is not set
# CONFIG_IA64_PRINT_HAZARDS is not set
......@@ -1090,7 +1113,7 @@ CONFIG_CRYPTO=y
# CONFIG_CRYPTO_HMAC is not set
# CONFIG_CRYPTO_NULL is not set
# CONFIG_CRYPTO_MD4 is not set
CONFIG_CRYPTO_MD5=m
CONFIG_CRYPTO_MD5=y
# CONFIG_CRYPTO_SHA1 is not set
# CONFIG_CRYPTO_SHA256 is not set
# CONFIG_CRYPTO_SHA512 is not set
......
This diff is collapsed.
......@@ -156,10 +156,13 @@
*/
#define DELAYED_RESOURCE_CNT 64
#define PCI_DEVICE_ID_HP_SX2000_IOC 0x12ec
#define ZX1_IOC_ID ((PCI_DEVICE_ID_HP_ZX1_IOC << 16) | PCI_VENDOR_ID_HP)
#define ZX2_IOC_ID ((PCI_DEVICE_ID_HP_ZX2_IOC << 16) | PCI_VENDOR_ID_HP)
#define REO_IOC_ID ((PCI_DEVICE_ID_HP_REO_IOC << 16) | PCI_VENDOR_ID_HP)
#define SX1000_IOC_ID ((PCI_DEVICE_ID_HP_SX1000_IOC << 16) | PCI_VENDOR_ID_HP)
#define SX2000_IOC_ID ((PCI_DEVICE_ID_HP_SX2000_IOC << 16) | PCI_VENDOR_ID_HP)
#define ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */
......@@ -1726,6 +1729,7 @@ static struct ioc_iommu ioc_iommu_info[] __initdata = {
{ ZX1_IOC_ID, "zx1", ioc_zx1_init },
{ ZX2_IOC_ID, "zx2", NULL },
{ SX1000_IOC_ID, "sx1000", NULL },
{ SX2000_IOC_ID, "sx2000", NULL },
};
static struct ioc * __init
......
......@@ -30,6 +30,7 @@
#include <linux/module.h>
#include <linux/serial.h>
#include <linux/serialP.h>
#include <linux/sysrq.h>
#include <asm/irq.h>
#include <asm/hw_irq.h>
......@@ -149,12 +150,17 @@ static void receive_chars(struct tty_struct *tty, struct pt_regs *regs)
seen_esc = 2;
continue;
} else if ( seen_esc == 2 ) {
if ( ch == 'P' ) show_state(); /* F1 key */
#ifdef CONFIG_KDB
if ( ch == 'S' )
kdb(KDB_REASON_KEYBOARD, 0, (kdb_eframe_t) regs);
if ( ch == 'P' ) /* F1 */
show_state();
#ifdef CONFIG_MAGIC_SYSRQ
if ( ch == 'S' ) { /* F4 */
do
ch = ia64_ssc(0, 0, 0, 0,
SSC_GETCHAR);
while (!ch);
handle_sysrq(ch, regs, NULL);
}
#endif
seen_esc = 0;
continue;
}
......
......@@ -470,18 +470,6 @@ ENTRY(load_switch_stack)
br.cond.sptk.many b7
END(load_switch_stack)
GLOBAL_ENTRY(__ia64_syscall)
.regstk 6,0,0,0
mov r15=in5 // put syscall number in place
break __BREAK_SYSCALL
movl r2=errno
cmp.eq p6,p7=-1,r10
;;
(p6) st4 [r2]=r8
(p6) mov r8=-1
br.ret.sptk.many rp
END(__ia64_syscall)
GLOBAL_ENTRY(execve)
mov r15=__NR_execve // put syscall number in place
break __BREAK_SYSCALL
......@@ -637,7 +625,7 @@ END(ia64_ret_from_syscall)
* r8-r11: restored (syscall return value(s))
* r12: restored (user-level stack pointer)
* r13: restored (user-level thread pointer)
* r14: cleared
* r14: set to __kernel_syscall_via_epc
* r15: restored (syscall #)
* r16-r17: cleared
* r18: user-level b6
......@@ -658,7 +646,7 @@ END(ia64_ret_from_syscall)
* pr: restored (user-level pr)
* b0: restored (user-level rp)
* b6: restored
* b7: cleared
* b7: set to __kernel_syscall_via_epc
* ar.unat: restored (user-level ar.unat)
* ar.pfs: restored (user-level ar.pfs)
* ar.rsc: restored (user-level ar.rsc)
......@@ -704,72 +692,79 @@ ENTRY(ia64_leave_syscall)
;;
(p6) ld4 r31=[r18] // load current_thread_info()->flags
ld8 r19=[r2],PT(B6)-PT(LOADRS) // load ar.rsc value for "loadrs"
mov b7=r0 // clear b7
nop.i 0
;;
ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE) // load ar.bspstore (may be garbage)
mov r16=ar.bsp // M2 get existing backing store pointer
ld8 r18=[r2],PT(R9)-PT(B6) // load b6
(p6) and r15=TIF_WORK_MASK,r31 // any work other than TIF_SYSCALL_TRACE?
;;
mov r16=ar.bsp // M2 get existing backing store pointer
ld8 r23=[r3],PT(R11)-PT(AR_BSPSTORE) // load ar.bspstore (may be garbage)
(p6) cmp4.ne.unc p6,p0=r15, r0 // any special work pending?
(p6) br.cond.spnt .work_pending_syscall
;;
// start restoring the state saved on the kernel stack (struct pt_regs):
ld8 r9=[r2],PT(CR_IPSR)-PT(R9)
ld8 r11=[r3],PT(CR_IIP)-PT(R11)
mov f6=f0 // clear f6
(pNonSys) break 0 // bug check: we shouldn't be here if pNonSys is TRUE!
;;
invala // M0|1 invalidate ALAT
rsm psr.i | psr.ic // M2 initiate turning off of interrupt and interruption collection
mov f9=f0 // clear f9
rsm psr.i | psr.ic // M2 turn off interrupts and interruption collection
cmp.eq p9,p0=r0,r0 // A set p9 to indicate that we should restore cr.ifs
ld8 r29=[r2],16 // load cr.ipsr
ld8 r28=[r3],16 // load cr.iip
mov f8=f0 // clear f8
ld8 r29=[r2],16 // M0|1 load cr.ipsr
ld8 r28=[r3],16 // M0|1 load cr.iip
mov r22=r0 // A clear r22
;;
ld8 r30=[r2],16 // M0|1 load cr.ifs
ld8 r25=[r3],16 // M0|1 load ar.unat
cmp.eq p9,p0=r0,r0 // set p9 to indicate that we should restore cr.ifs
(pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
;;
ld8 r26=[r2],PT(B0)-PT(AR_PFS) // M0|1 load ar.pfs
(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled
mov f10=f0 // clear f10
(pKStk) mov r22=psr // M2 read PSR now that interrupts are disabled
nop 0
;;
ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // load b0
ld8 r27=[r3],PT(PR)-PT(AR_RSC) // load ar.rsc
mov f11=f0 // clear f11
ld8 r21=[r2],PT(AR_RNAT)-PT(B0) // M0|1 load b0
ld8 r27=[r3],PT(PR)-PT(AR_RSC) // M0|1 load ar.rsc
mov f6=f0 // F clear f6
;;
ld8 r24=[r2],PT(AR_FPSR)-PT(AR_RNAT) // load ar.rnat (may be garbage)
ld8 r31=[r3],PT(R1)-PT(PR) // load predicates
(pUStk) add r14=IA64_TASK_THREAD_ON_USTACK_OFFSET,r13
ld8 r24=[r2],PT(AR_FPSR)-PT(AR_RNAT) // M0|1 load ar.rnat (may be garbage)
ld8 r31=[r3],PT(R1)-PT(PR) // M0|1 load predicates
mov f7=f0 // F clear f7
;;
ld8 r20=[r2],PT(R12)-PT(AR_FPSR) // load ar.fpsr
ld8.fill r1=[r3],16 // load r1
(pUStk) mov r17=1
ld8 r20=[r2],PT(R12)-PT(AR_FPSR) // M0|1 load ar.fpsr
ld8.fill r1=[r3],16 // M0|1 load r1
(pUStk) mov r17=1 // A
;;
srlz.d // M0 ensure interruption collection is off
ld8.fill r13=[r3],16
mov f7=f0 // clear f7
(pUStk) st1 [r14]=r17 // M2|3
ld8.fill r13=[r3],16 // M0|1
mov f8=f0 // F clear f8
;;
ld8.fill r12=[r2] // restore r12 (sp)
mov.m ar.ssd=r0 // M2 clear ar.ssd
mov r22=r0 // clear r22
ld8.fill r12=[r2] // M0|1 restore r12 (sp)
ld8.fill r15=[r3] // M0|1 restore r15
mov b6=r18 // I0 restore b6
ld8.fill r15=[r3] // restore r15
(pUStk) st1 [r14]=r17
addl r3=THIS_CPU(ia64_phys_stacked_size_p8),r0
addl r17=THIS_CPU(ia64_phys_stacked_size_p8),r0 // A
mov f9=f0 // F clear f9
(pKStk) br.cond.dpnt.many skip_rbs_switch // B
srlz.d // M0 ensure interruption collection is off (for cover)
shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition
cover // B add current frame into dirty partition & set cr.ifs
;;
(pUStk) ld4 r17=[r3] // r17 = cpu_data->phys_stacked_size_p8
mov.m ar.csd=r0 // M2 clear ar.csd
mov b6=r18 // I0 restore b6
(pUStk) ld4 r17=[r17] // M0|1 r17 = cpu_data->phys_stacked_size_p8
mov r19=ar.bsp // M2 get new backing store pointer
mov f10=f0 // F clear f10
nop.m 0
movl r14=__kernel_syscall_via_epc // X
;;
mov r14=r0 // clear r14
shr.u r18=r19,16 // I0|1 get byte size of existing "dirty" partition
(pKStk) br.cond.dpnt.many skip_rbs_switch
mov.m ar.csd=r0 // M2 clear ar.csd
mov.m ar.ccv=r0 // M2 clear ar.ccv
mov b7=r14 // I0 clear b7 (hint with __kernel_syscall_via_epc)
mov.m ar.ccv=r0 // clear ar.ccv
(pNonSys) br.cond.dpnt.many dont_preserve_current_frame
br.cond.sptk.many rbs_switch
mov.m ar.ssd=r0 // M2 clear ar.ssd
mov f11=f0 // F clear f11
br.cond.sptk.many rbs_switch // B
END(ia64_leave_syscall)
#ifdef CONFIG_IA32_SUPPORT
......@@ -885,7 +880,7 @@ GLOBAL_ENTRY(ia64_leave_kernel)
ldf.fill f7=[r2],PT(F11)-PT(F7)
ldf.fill f8=[r3],32
;;
srlz.i // ensure interruption collection is off
srlz.d // ensure that inter. collection is off (VHPT is don't care, since text is pinned)
mov ar.ccv=r15
;;
ldf.fill f11=[r2]
......@@ -945,11 +940,10 @@ GLOBAL_ENTRY(ia64_leave_kernel)
* NOTE: alloc, loadrs, and cover can't be predicated.
*/
(pNonSys) br.cond.dpnt dont_preserve_current_frame
rbs_switch:
cover // add current frame into dirty partition and set cr.ifs
;;
mov r19=ar.bsp // get new backing store pointer
rbs_switch:
sub r16=r16,r18 // krbs = old bsp - size of dirty partition
cmp.ne p9,p0=r0,r0 // clear p9 to skip restore of cr.ifs
;;
......@@ -1024,14 +1018,14 @@ rse_clear_invalid:
mov loc5=0
mov loc6=0
mov loc7=0
(pRecurse) br.call.sptk.few b0=rse_clear_invalid
(pRecurse) br.call.dptk.few b0=rse_clear_invalid
;;
mov loc8=0
mov loc9=0
cmp.ne pReturn,p0=r0,in1 // if recursion count != 0, we need to do a br.ret
mov loc10=0
mov loc11=0
(pReturn) br.ret.sptk.many b0
(pReturn) br.ret.dptk.many b0
#endif /* !CONFIG_ITANIUM */
# undef pRecurse
# undef pReturn
......
......@@ -531,93 +531,114 @@ GLOBAL_ENTRY(fsys_bubble_down)
.altrp b6
.body
/*
* We get here for syscalls that don't have a lightweight handler. For those, we
* need to bubble down into the kernel and that requires setting up a minimal
* pt_regs structure, and initializing the CPU state more or less as if an
* interruption had occurred. To make syscall-restarts work, we setup pt_regs
* such that cr_iip points to the second instruction in syscall_via_break.
* Decrementing the IP hence will restart the syscall via break and not
* decrementing IP will return us to the caller, as usual. Note that we preserve
* the value of psr.pp rather than initializing it from dcr.pp. This makes it
* possible to distinguish fsyscall execution from other privileged execution.
* We get here for syscalls that don't have a lightweight
* handler. For those, we need to bubble down into the kernel
* and that requires setting up a minimal pt_regs structure,
* and initializing the CPU state more or less as if an
* interruption had occurred. To make syscall-restarts work,
* we setup pt_regs such that cr_iip points to the second
* instruction in syscall_via_break. Decrementing the IP
* hence will restart the syscall via break and not
* decrementing IP will return us to the caller, as usual.
* Note that we preserve the value of psr.pp rather than
* initializing it from dcr.pp. This makes it possible to
* distinguish fsyscall execution from other privileged
* execution.
*
* On entry:
* - normal fsyscall handler register usage, except that we also have:
* - normal fsyscall handler register usage, except
* that we also have:
* - r18: address of syscall entry point
* - r21: ar.fpsr
* - r26: ar.pfs
* - r27: ar.rsc
* - r29: psr
*
* We used to clear some PSR bits here but that requires slow
* serialization. Fortuntely, that isn't really necessary.
* The rationale is as follows: we used to clear bits
* ~PSR_PRESERVED_BITS in PSR.L. Since
* PSR_PRESERVED_BITS==PSR.{UP,MFL,MFH,PK,DT,PP,SP,RT,IC}, we
* ended up clearing PSR.{BE,AC,I,DFL,DFH,DI,DB,SI,TB}.
* However,
*
* PSR.BE : already is turned off in __kernel_syscall_via_epc()
* PSR.AC : don't care (kernel normally turns PSR.AC on)
* PSR.I : already turned off by the time fsys_bubble_down gets
* invoked
* PSR.DFL: always 0 (kernel never turns it on)
* PSR.DFH: don't care --- kernel never touches f32-f127 on its own
* initiative
* PSR.DI : always 0 (kernel never turns it on)
* PSR.SI : always 0 (kernel never turns it on)
* PSR.DB : don't care --- kernel never enables kernel-level
* breakpoints
* PSR.TB : must be 0 already; if it wasn't zero on entry to
* __kernel_syscall_via_epc, the branch to fsys_bubble_down
* will trigger a taken branch; the taken-trap-handler then
* converts the syscall into a break-based system-call.
*/
# define PSR_PRESERVED_BITS (IA64_PSR_UP | IA64_PSR_MFL | IA64_PSR_MFH | IA64_PSR_PK \
| IA64_PSR_DT | IA64_PSR_PP | IA64_PSR_SP | IA64_PSR_RT \
| IA64_PSR_IC)
/*
* Reading psr.l gives us only bits 0-31, psr.it, and psr.mc. The rest we have
* to synthesize.
* Reading psr.l gives us only bits 0-31, psr.it, and psr.mc.
* The rest we have to synthesize.
*/
# define PSR_ONE_BITS ((3 << IA64_PSR_CPL0_BIT) | (0x1 << IA64_PSR_RI_BIT) \
# define PSR_ONE_BITS ((3 << IA64_PSR_CPL0_BIT) \
| (0x1 << IA64_PSR_RI_BIT) \
| IA64_PSR_BN | IA64_PSR_I)
invala
movl r8=PSR_ONE_BITS
invala // M0|1
movl r14=ia64_ret_from_syscall // X
mov r25=ar.unat // save ar.unat (5 cyc)
movl r9=PSR_PRESERVED_BITS
nop.m 0
movl r28=__kernel_syscall_via_break // X create cr.iip
;;
mov ar.rsc=0 // set enforced lazy mode, pl 0, little-endian, loadrs=0
movl r28=__kernel_syscall_via_break
mov r2=r16 // A get task addr to addl-addressable register
adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 // A
mov r31=pr // I0 save pr (2 cyc)
;;
mov r23=ar.bspstore // save ar.bspstore (12 cyc)
mov r31=pr // save pr (2 cyc)
mov r20=r1 // save caller's gp in r20
st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag
addl r22=IA64_RBS_OFFSET,r2 // A compute base of RBS
add r3=TI_FLAGS+IA64_TASK_SIZE,r2 // A
;;
mov r2=r16 // copy current task addr to addl-addressable register
and r9=r9,r29
mov r19=b6 // save b6 (2 cyc)
ld4 r3=[r3] // M0|1 r3 = current_thread_info()->flags
lfetch.fault.excl.nt1 [r22] // M0|1 prefetch register backing-store
nop.i 0
;;
mov psr.l=r9 // slam the door (17 cyc to srlz.i)
or r29=r8,r29 // construct cr.ipsr value to save
addl r22=IA64_RBS_OFFSET,r2 // compute base of RBS
mov ar.rsc=0 // M2 set enforced lazy mode, pl 0, LE, loadrs=0
nop.m 0
nop.i 0
;;
// GAS reports a spurious RAW hazard on the read of ar.rnat because it thinks
// we may be reading ar.itc after writing to psr.l. Avoid that message with
// this directive:
dv_serialize_data
mov.m r24=ar.rnat // read ar.rnat (5 cyc lat)
lfetch.fault.excl.nt1 [r22]
adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r2
// ensure previous insn group is issued before we stall for srlz.i:
mov r23=ar.bspstore // M2 (12 cyc) save ar.bspstore
mov.m r24=ar.rnat // M2 (5 cyc) read ar.rnat (dual-issues!)
nop.i 0
;;
srlz.i // ensure new psr.l has been established
/////////////////////////////////////////////////////////////////////////////
////////// from this point on, execution is not interruptible anymore
/////////////////////////////////////////////////////////////////////////////
addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2 // compute base of memory stack
cmp.ne pKStk,pUStk=r0,r0 // set pKStk <- 0, pUStk <- 1
mov ar.bspstore=r22 // M2 (6 cyc) switch to kernel RBS
movl r8=PSR_ONE_BITS // X
;;
st1 [r16]=r0 // clear current->thread.on_ustack flag
mov ar.bspstore=r22 // switch to kernel RBS
mov b6=r18 // copy syscall entry-point to b6 (7 cyc)
add r3=TI_FLAGS+IA64_TASK_SIZE,r2
mov r25=ar.unat // M2 (5 cyc) save ar.unat
mov r19=b6 // I0 save b6 (2 cyc)
mov r20=r1 // A save caller's gp in r20
;;
ld4 r3=[r3] // r2 = current_thread_info()->flags
mov r18=ar.bsp // save (kernel) ar.bsp (12 cyc)
mov ar.rsc=0x3 // set eager mode, pl 0, little-endian, loadrs=0
br.call.sptk.many b7=ia64_syscall_setup
;;
ssm psr.i
movl r2=ia64_ret_from_syscall
or r29=r8,r29 // A construct cr.ipsr value to save
mov b6=r18 // I0 copy syscall entry-point to b6 (7 cyc)
addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r2 // A compute base of memory stack
mov r18=ar.bsp // M2 save (kernel) ar.bsp (12 cyc)
cmp.ne pKStk,pUStk=r0,r0 // A set pKStk <- 0, pUStk <- 1
br.call.sptk.many b7=ia64_syscall_setup // B
;;
mov rp=r2 // set the real return addr
and r3=_TIF_SYSCALL_TRACEAUDIT,r3
mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0
mov rp=r14 // I0 set the real return addr
and r3=_TIF_SYSCALL_TRACEAUDIT,r3 // A
;;
cmp.eq p8,p0=r3,r0
ssm psr.i // M2 we're on kernel stacks now, reenable irqs
cmp.eq p8,p0=r3,r0 // A
(p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT
(p10) br.cond.spnt.many ia64_ret_from_syscall // p10==true means out registers are more than 8
(p8) br.call.sptk.many b6=b6 // ignore this return addr
br.cond.sptk ia64_trace_syscall
nop.m 0
(p8) br.call.sptk.many b6=b6 // B (ignore return address)
br.cond.spnt ia64_trace_syscall // B
END(fsys_bubble_down)
.rodata
......
......@@ -72,38 +72,40 @@ GLOBAL_ENTRY(__kernel_syscall_via_epc)
* bundle get executed. The remaining code must be safe even if
* they do not get executed.
*/
adds r17=-1024,r15
mov r10=0 // default to successful syscall execution
epc
adds r17=-1024,r15 // A
mov r10=0 // A default to successful syscall execution
epc // B causes split-issue
}
;;
rsm psr.be // note: on McKinley "rsm psr.be/srlz.d" is slightly faster than "rum psr.be"
LOAD_FSYSCALL_TABLE(r14)
mov r16=IA64_KR(CURRENT) // 12 cycle read latency
tnat.nz p10,p9=r15
mov r19=NR_syscalls-1
rsm psr.be | psr.i // M2 (5 cyc to srlz.d)
LOAD_FSYSCALL_TABLE(r14) // X
;;
shladd r18=r17,3,r14
srlz.d
cmp.ne p8,p0=r0,r0 // p8 <- FALSE
/* Note: if r17 is a NaT, p6 will be set to zero. */
cmp.geu p6,p7=r19,r17 // (syscall > 0 && syscall < 1024+NR_syscalls)?
;;
(p6) ld8 r18=[r18]
mov r21=ar.fpsr
add r14=-8,r14 // r14 <- addr of fsys_bubble_down entry
;;
(p6) mov b7=r18
(p6) tbit.z p8,p0=r18,0
(p8) br.dptk.many b7
(p6) rsm psr.i
mov r27=ar.rsc
mov r26=ar.pfs
mov r16=IA64_KR(CURRENT) // M2 (12 cyc)
shladd r18=r17,3,r14 // A
mov r19=NR_syscalls-1 // A
;;
lfetch [r18] // M0|1
mov r29=psr // M2 (12 cyc)
// If r17 is a NaT, p6 will be zero
cmp.geu p6,p7=r19,r17 // A (sysnr > 0 && sysnr < 1024+NR_syscalls)?
;;
mov r21=ar.fpsr // M2 (12 cyc)
tnat.nz p10,p9=r15 // I0
mov.i r26=ar.pfs // I0 (would stall anyhow due to srlz.d...)
;;
srlz.d // M0 (forces split-issue) ensure PSR.BE==0
(p6) ld8 r18=[r18] // M0|1
nop.i 0
;;
nop.m 0
(p6) tbit.z.unc p8,p0=r18,0 // I0 (dual-issues with "mov b7=r18"!)
nop.i 0
;;
mov r29=psr // read psr (12 cyc load latency)
(p8) ssm psr.i
(p6) mov b7=r18 // I0
(p8) br.dptk.many b7 // B
mov r27=ar.rsc // M2 (12 cyc)
/*
* brl.cond doesn't work as intended because the linker would convert this branch
* into a branch to a PLT. Perhaps there will be a way to avoid this with some
......@@ -111,6 +113,8 @@ GLOBAL_ENTRY(__kernel_syscall_via_epc)
* instead.
*/
#ifdef CONFIG_ITANIUM
(p6) add r14=-8,r14 // r14 <- addr of fsys_bubble_down entry
;;
(p6) ld8 r14=[r14] // r14 <- fsys_bubble_down
;;
(p6) mov b7=r14
......@@ -118,7 +122,7 @@ GLOBAL_ENTRY(__kernel_syscall_via_epc)
#else
BRL_COND_FSYS_BUBBLE_DOWN(p6)
#endif
ssm psr.i
mov r10=-1
(p10) mov r8=EINVAL
(p9) mov r8=ENOSYS
......
......@@ -58,9 +58,6 @@ EXPORT_SYMBOL(__strlen_user);
EXPORT_SYMBOL(__strncpy_from_user);
EXPORT_SYMBOL(__strnlen_user);
#include <asm/unistd.h>
EXPORT_SYMBOL(__ia64_syscall);
/* from arch/ia64/lib */
extern void __divsi3(void);
extern void __udivsi3(void);
......
/*
* arch/ia64/kernel/ivt.S
*
* Copyright (C) 1998-2001, 2003 Hewlett-Packard Co
* Copyright (C) 1998-2001, 2003, 2005 Hewlett-Packard Co
* Stephane Eranian <eranian@hpl.hp.com>
* David Mosberger <davidm@hpl.hp.com>
* Copyright (C) 2000, 2002-2003 Intel Co
......@@ -692,82 +692,118 @@ ENTRY(break_fault)
* to prevent leaking bits from kernel to user level.
*/
DBG_FAULT(11)
mov r16=IA64_KR(CURRENT) // r16 = current task; 12 cycle read lat.
mov r17=cr.iim
mov r18=__IA64_BREAK_SYSCALL
mov r21=ar.fpsr
mov r29=cr.ipsr
mov r19=b6
mov r25=ar.unat
mov r27=ar.rsc
mov r26=ar.pfs
mov r28=cr.iip
mov r31=pr // prepare to save predicates
mov r20=r1
;;
mov.m r16=IA64_KR(CURRENT) // M2 r16 <- current task (12 cyc)
mov r29=cr.ipsr // M2 (12 cyc)
mov r31=pr // I0 (2 cyc)
mov r17=cr.iim // M2 (2 cyc)
mov.m r27=ar.rsc // M2 (12 cyc)
mov r18=__IA64_BREAK_SYSCALL // A
mov.m ar.rsc=0 // M2
mov.m r21=ar.fpsr // M2 (12 cyc)
mov r19=b6 // I0 (2 cyc)
;;
mov.m r23=ar.bspstore // M2 (12 cyc)
mov.m r24=ar.rnat // M2 (5 cyc)
mov.i r26=ar.pfs // I0 (2 cyc)
invala // M0|1
nop.m 0 // M
mov r20=r1 // A save r1
nop.m 0
movl r30=sys_call_table // X
mov r28=cr.iip // M2 (2 cyc)
cmp.eq p0,p7=r18,r17 // I0 is this a system call?
(p7) br.cond.spnt non_syscall // B no ->
//
// From this point on, we are definitely on the syscall-path
// and we can use (non-banked) scratch registers.
//
///////////////////////////////////////////////////////////////////////
mov r1=r16 // A move task-pointer to "addl"-addressable reg
mov r2=r16 // A setup r2 for ia64_syscall_setup
add r9=TI_FLAGS+IA64_TASK_SIZE,r16 // A r9 = &current_thread_info()->flags
adds r16=IA64_TASK_THREAD_ON_USTACK_OFFSET,r16
cmp.eq p0,p7=r18,r17 // is this a system call? (p7 <- false, if so)
(p7) br.cond.spnt non_syscall
adds r15=-1024,r15 // A subtract 1024 from syscall number
mov r3=NR_syscalls - 1
;;
ld1 r17=[r16] // load current->thread.on_ustack flag
st1 [r16]=r0 // clear current->thread.on_ustack flag
add r1=-IA64_TASK_THREAD_ON_USTACK_OFFSET,r16 // set r1 for MINSTATE_START_SAVE_MIN_VIRT
ld1.bias r17=[r16] // M0|1 r17 = current->thread.on_ustack flag
ld4 r9=[r9] // M0|1 r9 = current_thread_info()->flags
extr.u r8=r29,41,2 // I0 extract ei field from cr.ipsr
shladd r30=r15,3,r30 // A r30 = sys_call_table + 8*(syscall-1024)
addl r22=IA64_RBS_OFFSET,r1 // A compute base of RBS
cmp.leu p6,p7=r15,r3 // A syscall number in range?
;;
invala
/* adjust return address so we skip over the break instruction: */
lfetch.fault.excl.nt1 [r22] // M0|1 prefetch RBS
(p6) ld8 r30=[r30] // M0|1 load address of syscall entry point
tnat.nz.or p7,p0=r15 // I0 is syscall nr a NaT?
extr.u r8=r29,41,2 // extract ei field from cr.ipsr
;;
cmp.eq p6,p7=2,r8 // isr.ei==2?
mov r2=r1 // setup r2 for ia64_syscall_setup
;;
(p6) mov r8=0 // clear ei to 0
(p6) adds r28=16,r28 // switch cr.iip to next bundle cr.ipsr.ei wrapped
(p7) adds r8=1,r8 // increment ei to next slot
;;
cmp.eq pKStk,pUStk=r0,r17 // are we in kernel mode already?
dep r29=r8,r29,41,2 // insert new ei into cr.ipsr
mov.m ar.bspstore=r22 // M2 switch to kernel RBS
cmp.eq p8,p9=2,r8 // A isr.ei==2?
;;
// switch from user to kernel RBS:
MINSTATE_START_SAVE_MIN_VIRT
br.call.sptk.many b7=ia64_syscall_setup
;;
MINSTATE_END_SAVE_MIN_VIRT // switch to bank 1
ssm psr.ic | PSR_DEFAULT_BITS
;;
srlz.i // guarantee that interruption collection is on
mov r3=NR_syscalls - 1
;;
(p15) ssm psr.i // restore psr.i
// p10==true means out registers are more than 8 or r15's Nat is true
(p10) br.cond.spnt.many ia64_ret_from_syscall
;;
movl r16=sys_call_table
(p8) mov r8=0 // A clear ei to 0
(p7) movl r30=sys_ni_syscall // X
adds r15=-1024,r15 // r15 contains the syscall number---subtract 1024
movl r2=ia64_ret_from_syscall
;;
shladd r20=r15,3,r16 // r20 = sys_call_table + 8*(syscall-1024)
cmp.leu p6,p7=r15,r3 // (syscall > 0 && syscall < 1024 + NR_syscalls) ?
mov rp=r2 // set the real return addr
(p8) adds r28=16,r28 // A switch cr.iip to next bundle
(p9) adds r8=1,r8 // A increment ei to next slot
nop.i 0
;;
(p6) ld8 r20=[r20] // load address of syscall entry point
(p7) movl r20=sys_ni_syscall
add r2=TI_FLAGS+IA64_TASK_SIZE,r13
;;
ld4 r2=[r2] // r2 = current_thread_info()->flags
;;
and r2=_TIF_SYSCALL_TRACEAUDIT,r2 // mask trace or audit
mov.m r25=ar.unat // M2 (5 cyc)
dep r29=r8,r29,41,2 // I0 insert new ei into cr.ipsr
adds r15=1024,r15 // A restore original syscall number
//
// If any of the above loads miss in L1D, we'll stall here until
// the data arrives.
//
///////////////////////////////////////////////////////////////////////
st1 [r16]=r0 // M2|3 clear current->thread.on_ustack flag
mov b6=r30 // I0 setup syscall handler branch reg early
cmp.eq pKStk,pUStk=r0,r17 // A were we on kernel stacks already?
and r9=_TIF_SYSCALL_TRACEAUDIT,r9 // A mask trace or audit
mov r18=ar.bsp // M2 (12 cyc)
(pKStk) br.cond.spnt .break_fixup // B we're already in kernel-mode -- fix up RBS
;;
.back_from_break_fixup:
(pUStk) addl r1=IA64_STK_OFFSET-IA64_PT_REGS_SIZE,r1 // A compute base of memory stack
cmp.eq p14,p0=r9,r0 // A are syscalls being traced/audited?
br.call.sptk.many b7=ia64_syscall_setup // B
1:
mov ar.rsc=0x3 // M2 set eager mode, pl 0, LE, loadrs=0
nop 0
bsw.1 // B (6 cyc) regs are saved, switch to bank 1
;;
cmp.eq p8,p0=r2,r0
mov b6=r20
ssm psr.ic | PSR_DEFAULT_BITS // M2 now it's safe to re-enable intr.-collection
movl r3=ia64_ret_from_syscall // X
;;
(p8) br.call.sptk.many b6=b6 // ignore this return addr
br.cond.sptk ia64_trace_syscall
srlz.i // M0 ensure interruption collection is on
mov rp=r3 // I0 set the real return addr
(p10) br.cond.spnt.many ia64_ret_from_syscall // B return if bad call-frame or r15 is a NaT
(p15) ssm psr.i // M2 restore psr.i
(p14) br.call.sptk.many b6=b6 // B invoke syscall-handker (ignore return addr)
br.cond.spnt.many ia64_trace_syscall // B do syscall-tracing thingamagic
// NOT REACHED
///////////////////////////////////////////////////////////////////////
// On entry, we optimistically assumed that we're coming from user-space.
// For the rare cases where a system-call is done from within the kernel,
// we fix things up at this point:
.break_fixup:
add r1=-IA64_PT_REGS_SIZE,sp // A allocate space for pt_regs structure
mov ar.rnat=r24 // M2 restore kernel's AR.RNAT
;;
mov ar.bspstore=r23 // M2 restore kernel's AR.BSPSTORE
br.cond.sptk .back_from_break_fixup
END(break_fault)
.org ia64_ivt+0x3000
......@@ -842,8 +878,6 @@ END(interrupt)
* - r31: saved pr
* - b0: original contents (to be saved)
* On exit:
* - executing on bank 1 registers
* - psr.ic enabled, interrupts restored
* - p10: TRUE if syscall is invoked with more than 8 out
* registers or r15's Nat is true
* - r1: kernel's gp
......@@ -851,8 +885,11 @@ END(interrupt)
* - r8: -EINVAL if p10 is true
* - r12: points to kernel stack
* - r13: points to current task
* - r14: preserved (same as on entry)
* - p13: preserved
* - p15: TRUE if interrupts need to be re-enabled
* - ar.fpsr: set to kernel settings
* - b6: preserved (same as on entry)
*/
GLOBAL_ENTRY(ia64_syscall_setup)
#if PT(B6) != 0
......@@ -920,10 +957,10 @@ GLOBAL_ENTRY(ia64_syscall_setup)
(p13) mov in5=-1
;;
st8 [r16]=r21,PT(R8)-PT(AR_FPSR) // save ar.fpsr
tnat.nz p14,p0=in6
tnat.nz p13,p0=in6
cmp.lt p10,p9=r11,r8 // frame size can't be more than local+8
;;
stf8 [r16]=f1 // ensure pt_regs.r8 != 0 (see handle_syscall_error)
mov r8=1
(p9) tnat.nz p10,p0=r15
adds r12=-16,r1 // switch to kernel memory stack (with 16 bytes of scratch)
......@@ -934,9 +971,9 @@ GLOBAL_ENTRY(ia64_syscall_setup)
mov r13=r2 // establish `current'
movl r1=__gp // establish kernel global pointer
;;
(p14) mov in6=-1
st8 [r16]=r8 // ensure pt_regs.r8 != 0 (see handle_syscall_error)
(p13) mov in6=-1
(p8) mov in7=-1
nop.i 0
cmp.eq pSys,pNonSys=r0,r0 // set pSys=1, pNonSys=0
movl r17=FPSR_DEFAULT
......@@ -1007,6 +1044,8 @@ END(dispatch_illegal_op_fault)
FAULT(17)
ENTRY(non_syscall)
mov ar.rsc=r27 // restore ar.rsc before SAVE_MIN_WITH_COVER
;;
SAVE_MIN_WITH_COVER
// There is no particular reason for this code to be here, other than that
......@@ -1204,6 +1243,25 @@ END(disabled_fp_reg)
// 0x5600 Entry 26 (size 16 bundles) Nat Consumption (11,23,37,50)
ENTRY(nat_consumption)
DBG_FAULT(26)
mov r16=cr.ipsr
mov r17=cr.isr
mov r31=pr // save PR
;;
and r18=0xf,r17 // r18 = cr.ipsr.code{3:0}
tbit.z p6,p0=r17,IA64_ISR_NA_BIT
;;
cmp.ne.or p6,p0=IA64_ISR_CODE_LFETCH,r18
dep r16=-1,r16,IA64_PSR_ED_BIT,1
(p6) br.cond.spnt 1f // branch if (cr.ispr.na == 0 || cr.ipsr.code{3:0} != LFETCH)
;;
mov cr.ipsr=r16 // set cr.ipsr.na
mov pr=r31,-1
;;
rfi
1: mov pr=r31,-1
;;
FAULT(26)
END(nat_consumption)
......
......@@ -725,12 +725,32 @@ convert_to_non_syscall (struct task_struct *child, struct pt_regs *pt,
break;
}
/*
* Note: at the time of this call, the target task is blocked
* in notify_resume_user() and by clearling PRED_LEAVE_SYSCALL
* (aka, "pLvSys") we redirect execution from
* .work_pending_syscall_end to .work_processed_kernel.
*/
unw_get_pr(&prev_info, &pr);
pr &= ~(1UL << PRED_SYSCALL);
pr &= ~((1UL << PRED_SYSCALL) | (1UL << PRED_LEAVE_SYSCALL));
pr |= (1UL << PRED_NON_SYSCALL);
unw_set_pr(&prev_info, pr);
pt->cr_ifs = (1UL << 63) | cfm;
/*
* Clear the memory that is NOT written on syscall-entry to
* ensure we do not leak kernel-state to user when execution
* resumes.
*/
pt->r2 = 0;
pt->r3 = 0;
pt->r14 = 0;
memset(&pt->r16, 0, 16*8); /* clear r16-r31 */
memset(&pt->f6, 0, 6*16); /* clear f6-f11 */
pt->b7 = 0;
pt->ar_ccv = 0;
pt->ar_csd = 0;
pt->ar_ssd = 0;
}
static int
......
......@@ -72,6 +72,8 @@ DEFINE_PER_CPU(unsigned long, ia64_phys_stacked_size_p8);
unsigned long ia64_cycles_per_usec;
struct ia64_boot_param *ia64_boot_param;
struct screen_info screen_info;
unsigned long vga_console_iobase;
unsigned long vga_console_membase;
unsigned long ia64_max_cacheline_size;
unsigned long ia64_iobase; /* virtual address for I/O accesses */
......@@ -273,23 +275,25 @@ io_port_init (void)
static inline int __init
early_console_setup (char *cmdline)
{
int earlycons = 0;
#ifdef CONFIG_SERIAL_SGI_L1_CONSOLE
{
extern int sn_serial_console_early_setup(void);
if (!sn_serial_console_early_setup())
return 0;
earlycons++;
}
#endif
#ifdef CONFIG_EFI_PCDP
if (!efi_setup_pcdp_console(cmdline))
return 0;
earlycons++;
#endif
#ifdef CONFIG_SERIAL_8250_CONSOLE
if (!early_serial_console_init(cmdline))
return 0;
earlycons++;
#endif
return -1;
return (earlycons) ? 0 : -1;
}
static inline void
......
......@@ -231,13 +231,16 @@ smp_flush_tlb_all (void)
void
smp_flush_tlb_mm (struct mm_struct *mm)
{
preempt_disable();
/* this happens for the common case of a single-threaded fork(): */
if (likely(mm == current->active_mm && atomic_read(&mm->mm_users) == 1))
{
local_finish_flush_tlb_mm(mm);
preempt_enable();
return;
}
preempt_enable();
/*
* We could optimize this further by using mm->cpu_vm_mask to track which CPUs
* have been running in the address space. It's not clear that this is worth the
......
......@@ -384,7 +384,7 @@ static int __init sn_pci_init(void)
extern void register_sn_procfs(void);
#endif
if (!ia64_platform_is("sn2") || IS_RUNNING_ON_SIMULATOR())
if (!ia64_platform_is("sn2") || IS_RUNNING_ON_FAKE_PROM())
return 0;
/*
......
......@@ -9,12 +9,16 @@
#include <linux/module.h>
#include <asm/io.h>
#include <asm/delay.h>
#include <asm/vga.h>
#include <asm/sn/nodepda.h>
#include <asm/sn/simulator.h>
#include <asm/sn/pda.h>
#include <asm/sn/sn_cpuid.h>
#include <asm/sn/shub_mmr.h>
#define IS_LEGACY_VGA_IOPORT(p) \
(((p) >= 0x3b0 && (p) <= 0x3bb) || ((p) >= 0x3c0 && (p) <= 0x3df))
/**
* sn_io_addr - convert an in/out port to an i/o address
* @port: port to convert
......@@ -26,6 +30,8 @@
void *sn_io_addr(unsigned long port)
{
if (!IS_RUNNING_ON_SIMULATOR()) {
if (IS_LEGACY_VGA_IOPORT(port))
port += vga_console_iobase;
/* On sn2, legacy I/O ports don't point at anything */
if (port < (64 * 1024))
return NULL;
......
......@@ -36,6 +36,7 @@
#include <asm/machvec.h>
#include <asm/system.h>
#include <asm/processor.h>
#include <asm/vga.h>
#include <asm/sn/arch.h>
#include <asm/sn/addrs.h>
#include <asm/sn/pda.h>
......@@ -95,6 +96,7 @@ u8 sn_coherency_id;
EXPORT_SYMBOL(sn_coherency_id);
u8 sn_region_size;
EXPORT_SYMBOL(sn_region_size);
int sn_prom_type; /* 0=hardware, 1=medusa/realprom, 2=medusa/fakeprom */
short physical_node_map[MAX_PHYSNODE_ID];
......@@ -273,14 +275,17 @@ void __init sn_setup(char **cmdline_p)
ia64_sn_plat_set_error_handling_features();
#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
/*
* If the generic code has enabled vga console support - lets
* get rid of it again. This is a kludge for the fact that ACPI
* currtently has no way of informing us if legacy VGA is available
* or not.
* If there was a primary vga adapter identified through the
* EFI PCDP table, make it the preferred console. Otherwise
* zero out conswitchp.
*/
#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
if (conswitchp == &vga_con) {
if (vga_console_membase) {
/* usable vga ... make tty0 the preferred default console */
add_preferred_console("tty", 0, NULL);
} else {
printk(KERN_DEBUG "SGI: Disabling VGA console\n");
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
......@@ -350,7 +355,7 @@ void __init sn_setup(char **cmdline_p)
ia64_mark_idle = &snidle;
/*
/*
* For the bootcpu, we do this here. All other cpus will make the
* call as part of cpu_init in slave cpu initialization.
*/
......@@ -397,7 +402,7 @@ static void __init sn_init_pdas(char **cmdline_p)
nodepdaindr[cnode] =
alloc_bootmem_node(NODE_DATA(cnode), sizeof(nodepda_t));
memset(nodepdaindr[cnode], 0, sizeof(nodepda_t));
memset(nodepdaindr[cnode]->phys_cpuid, -1,
memset(nodepdaindr[cnode]->phys_cpuid, -1,
sizeof(nodepdaindr[cnode]->phys_cpuid));
}
......@@ -427,7 +432,7 @@ static void __init sn_init_pdas(char **cmdline_p)
}
/*
* Initialize the per node hubdev. This includes IO Nodes and
* Initialize the per node hubdev. This includes IO Nodes and
* headless/memless nodes.
*/
for (cnode = 0; cnode < numionodes; cnode++) {
......@@ -455,6 +460,14 @@ void __init sn_cpu_init(void)
int i;
static int wars_have_been_checked;
if (smp_processor_id() == 0 && IS_MEDUSA()) {
if (ia64_sn_is_fake_prom())
sn_prom_type = 2;
else
sn_prom_type = 1;
printk("Running on medusa with %s PROM\n", (sn_prom_type == 1) ? "real" : "fake");
}
memset(pda, 0, sizeof(pda));
if (ia64_sn_get_sn_info(0, &sn_hub_info->shub2, &sn_hub_info->nasid_bitmask, &sn_hub_info->nasid_shift,
&sn_system_size, &sn_sharing_domain_size, &sn_partition_id,
......@@ -520,7 +533,7 @@ void __init sn_cpu_init(void)
*/
{
u64 pio1[] = {SH1_PIO_WRITE_STATUS_0, 0, SH1_PIO_WRITE_STATUS_1, 0};
u64 pio2[] = {SH2_PIO_WRITE_STATUS_0, SH2_PIO_WRITE_STATUS_1,
u64 pio2[] = {SH2_PIO_WRITE_STATUS_0, SH2_PIO_WRITE_STATUS_1,
SH2_PIO_WRITE_STATUS_2, SH2_PIO_WRITE_STATUS_3};
u64 *pio;
pio = is_shub1() ? pio1 : pio2;
......@@ -552,6 +565,10 @@ static void __init scan_for_ionodes(void)
int nasid = 0;
lboard_t *brd;
/* fakeprom does not support klgraph */
if (IS_RUNNING_ON_FAKE_PROM())
return;
/* Setup ionodes with memory */
for (nasid = 0; nasid < MAX_PHYSNODE_ID; nasid += 2) {
char *klgraph_header;
......@@ -563,8 +580,6 @@ static void __init scan_for_ionodes(void)
cnodeid = -1;
klgraph_header = __va(ia64_sn_get_klconfig_addr(nasid));
if (!klgraph_header) {
if (IS_RUNNING_ON_SIMULATOR())
continue;
BUG(); /* All nodes must have klconfig tables! */
}
cnodeid = nasid_to_cnodeid(nasid);
......@@ -630,8 +645,8 @@ int
nasid_slice_to_cpuid(int nasid, int slice)
{
long cpu;
for (cpu=0; cpu < NR_CPUS; cpu++)
for (cpu=0; cpu < NR_CPUS; cpu++)
if (cpuid_to_nasid(cpu) == nasid &&
cpuid_to_slice(cpu) == slice)
return cpu;
......
......@@ -6,6 +6,7 @@
* Copyright (C) 2000-2004 Silicon Graphics, Inc. All rights reserved.
*/
#include <asm/types.h>
#include <asm/sn/shub_mmr.h>
#define DEADLOCKBIT SH_PIO_WRITE_STATUS_WRITE_DEADLOCK_SHFT
......
......@@ -204,8 +204,8 @@ cx_device_register(nasid_t nasid, int part_num, int mfg_num,
cx_dev->dev.parent = NULL;
cx_dev->dev.bus = &tiocx_bus_type;
cx_dev->dev.release = tiocx_bus_release;
snprintf(cx_dev->dev.bus_id, BUS_ID_SIZE, "%d.0x%x",
cx_dev->cx_id.nasid, cx_dev->cx_id.part_num);
snprintf(cx_dev->dev.bus_id, BUS_ID_SIZE, "%d",
cx_dev->cx_id.nasid);
device_register(&cx_dev->dev);
get_device(&cx_dev->dev);
......@@ -236,7 +236,6 @@ int cx_device_unregister(struct cx_dev *cx_dev)
*/
static int cx_device_reload(struct cx_dev *cx_dev)
{
device_remove_file(&cx_dev->dev, &dev_attr_cxdev_control);
cx_device_unregister(cx_dev);
return cx_device_register(cx_dev->cx_id.nasid, cx_dev->cx_id.part_num,
cx_dev->cx_id.mfg_num, cx_dev->hubdev);
......@@ -383,6 +382,7 @@ static int is_fpga_brick(int nasid)
switch (tiocx_btchar_get(nasid)) {
case L1_BRICKTYPE_SA:
case L1_BRICKTYPE_ATHENA:
case L1_BRICKTYPE_DAYTONA:
return 1;
}
return 0;
......@@ -409,7 +409,7 @@ static int tiocx_reload(struct cx_dev *cx_dev)
uint64_t cx_id;
cx_id =
*(volatile int32_t *)(TIO_SWIN_BASE(nasid, TIOCX_CORELET) +
*(volatile uint64_t *)(TIO_SWIN_BASE(nasid, TIOCX_CORELET) +
WIDGET_ID);
part_num = XWIDGET_PART_NUM(cx_id);
mfg_num = XWIDGET_MFG_NUM(cx_id);
......@@ -458,6 +458,10 @@ static ssize_t store_cxdev_control(struct device *dev, struct device_attribute *
switch (n) {
case 1:
tio_corelet_reset(cx_dev->cx_id.nasid, TIOCX_CORELET);
tiocx_reload(cx_dev);
break;
case 2:
tiocx_reload(cx_dev);
break;
case 3:
......@@ -537,7 +541,7 @@ static void __exit tiocx_exit(void)
bus_unregister(&tiocx_bus_type);
}
module_init(tiocx_init);
subsys_initcall(tiocx_init);
module_exit(tiocx_exit);
/************************************************************************
......
......@@ -336,7 +336,7 @@ tioca_dma_d48(struct pci_dev *pdev, uint64_t paddr)
if (!ct_addr)
return 0;
bus_addr = (dma_addr_t) (ct_addr & 0xffffffffffff);
bus_addr = (dma_addr_t) (ct_addr & 0xffffffffffffUL);
node_upper = ct_addr >> 48;
if (node_upper > 64) {
......@@ -464,7 +464,7 @@ tioca_dma_mapped(struct pci_dev *pdev, uint64_t paddr, size_t req_size)
* For mappings created using the direct modes (64 or 48) there are no
* resources to release.
*/
void
static void
tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
{
int i, entry;
......@@ -514,7 +514,7 @@ tioca_dma_unmap(struct pci_dev *pdev, dma_addr_t bus_addr, int dir)
* The mapping mode used is based on the devices dma_mask. As a last resort
* use the GART mapped mode.
*/
uint64_t
static uint64_t
tioca_dma_map(struct pci_dev *pdev, uint64_t paddr, size_t byte_count)
{
uint64_t mapaddr;
......@@ -580,7 +580,7 @@ tioca_error_intr_handler(int irq, void *arg, struct pt_regs *pt)
* On successful setup, returns the kernel version of tioca_common back to
* the caller.
*/
void *
static void *
tioca_bus_fixup(struct pcibus_bussoft *prom_bussoft)
{
struct tioca_common *tioca_common;
......
......@@ -16,6 +16,7 @@
#include <linux/console.h>
#include <linux/efi.h>
#include <linux/serial.h>
#include <asm/vga.h>
#include "pcdp.h"
static int __init
......@@ -40,10 +41,27 @@ setup_serial_console(struct pcdp_uart *uart)
}
static int __init
setup_vga_console(struct pcdp_vga *vga)
setup_vga_console(struct pcdp_device *dev)
{
#if defined(CONFIG_VT) && defined(CONFIG_VGA_CONSOLE)
if (efi_mem_type(0xA0000) == EFI_CONVENTIONAL_MEMORY) {
u8 *if_ptr;
if_ptr = ((u8 *)dev + sizeof(struct pcdp_device));
if (if_ptr[0] == PCDP_IF_PCI) {
struct pcdp_if_pci if_pci;
/* struct copy since ifptr might not be correctly aligned */
memcpy(&if_pci, if_ptr, sizeof(if_pci));
if (if_pci.trans & PCDP_PCI_TRANS_IOPORT)
vga_console_iobase = if_pci.ioport_tra;
if (if_pci.trans & PCDP_PCI_TRANS_MMIO)
vga_console_membase = if_pci.mmio_tra;
}
if (efi_mem_type(vga_console_membase + 0xA0000) == EFI_CONVENTIONAL_MEMORY) {
printk(KERN_ERR "PCDP: VGA selected, but frame buffer is not MMIO!\n");
return -ENODEV;
}
......@@ -95,7 +113,7 @@ efi_setup_pcdp_console(char *cmdline)
dev = (struct pcdp_device *) ((u8 *) dev + dev->length)) {
if (dev->flags & PCDP_PRIMARY_CONSOLE) {
if (dev->type == PCDP_CONSOLE_VGA) {
return setup_vga_console((struct pcdp_vga *) dev);
return setup_vga_console(dev);
}
}
}
......
......@@ -52,11 +52,34 @@ struct pcdp_uart {
u32 clock_rate;
u8 pci_prog_intfc;
u8 flags;
};
} __attribute__((packed));
#define PCDP_IF_PCI 1
/* pcdp_if_pci.trans */
#define PCDP_PCI_TRANS_IOPORT 0x02
#define PCDP_PCI_TRANS_MMIO 0x01
struct pcdp_if_pci {
u8 interconnect;
u8 reserved;
u16 length;
u8 segment;
u8 bus;
u8 dev;
u8 fun;
u16 dev_id;
u16 vendor_id;
u32 acpi_interrupt;
u64 mmio_tra;
u64 ioport_tra;
u8 flags;
u8 trans;
} __attribute__((packed));
struct pcdp_vga {
u8 count; /* address space descriptors */
};
} __attribute__((packed));
/* pcdp_device.flags */
#define PCDP_PRIMARY_CONSOLE 1
......@@ -66,7 +89,9 @@ struct pcdp_device {
u8 flags;
u16 length;
u16 efi_index;
};
/* next data is pcdp_if_pci or pcdp_if_acpi (not yet supported) */
/* next data is device specific type (currently only pcdp_vga) */
} __attribute__((packed));
struct pcdp {
u8 signature[4];
......@@ -81,4 +106,4 @@ struct pcdp {
u32 num_uarts;
struct pcdp_uart uart[0]; /* actual size is num_uarts */
/* remainder of table is pcdp_device structures */
};
} __attribute__((packed));
......@@ -132,6 +132,9 @@ reload_context (mm_context_t context)
ia64_srlz_i(); /* srlz.i implies srlz.d */
}
/*
* Must be called with preemption off
*/
static inline void
activate_context (struct mm_struct *mm)
{
......
......@@ -216,6 +216,10 @@
#define TIO_SWIN_WIDGETNUM(x) (((x) >> TIO_SWIN_SIZE_BITS) & TIO_SWIN_WIDGET_MASK)
#define TIO_IOSPACE_ADDR(n,x) \
/* Move in the Chiplet ID for TIO Local Block MMR */ \
(REMOTE_ADDR(n,x) | 1UL << (NASID_SHIFT - 2))
/*
* The following macros produce the correct base virtual address for
* the hub registers. The REMOTE_HUB_* macro produce
......@@ -233,13 +237,16 @@
#define REMOTE_HUB_ADDR(n,x) \
((n & 1) ? \
/* TIO: */ \
((volatile u64 *)(GLOBAL_MMR_ADDR(n,x))) \
: /* SHUB: */ \
(((x) & BWIN_TOP) ? ((volatile u64 *)(GLOBAL_MMR_ADDR(n,x)))\
(is_shub2() ? \
/* TIO on Shub2 */ \
(volatile u64 *)(TIO_IOSPACE_ADDR(n,x)) \
: /* TIO on shub1 */ \
(volatile u64 *)(GLOBAL_MMR_ADDR(n,x))) \
\
: /* SHUB1 and SHUB2 MMRs: */ \
(((x) & BWIN_TOP) ? ((volatile u64 *)(GLOBAL_MMR_ADDR(n,x))) \
: ((volatile u64 *)(NODE_SWIN_BASE(n,1) + 0x800000 + (x)))))
#define HUB_L(x) (*((volatile typeof(*x) *)x))
#define HUB_S(x,d) (*((volatile typeof(*x) *)x) = (d))
......
......@@ -33,5 +33,6 @@
#define L1_BRICKTYPE_PA 0x6a /* j */
#define L1_BRICKTYPE_IA 0x6b /* k */
#define L1_BRICKTYPE_ATHENA 0x2b /* + */
#define L1_BRICKTYPE_DAYTONA 0x7a /* z */
#endif /* _ASM_IA64_SN_L1_H */
This diff is collapsed.
......@@ -10,16 +10,17 @@
#include <linux/config.h>
#ifdef CONFIG_IA64_SGI_SN_SIM
#define SNMAGIC 0xaeeeeeee8badbeefL
#define IS_RUNNING_ON_SIMULATOR() ({long sn; asm("mov %0=cpuid[%1]" : "=r"(sn) : "r"(2)); sn == SNMAGIC;})
#define SIMULATOR_SLEEP() asm("nop.i 0x8beef")
#define IS_MEDUSA() ({long sn; asm("mov %0=cpuid[%1]" : "=r"(sn) : "r"(2)); sn == SNMAGIC;})
#ifdef CONFIG_IA64_SGI_SN_SIM
#define SIMULATOR_SLEEP() asm("nop.i 0x8beef")
#define IS_RUNNING_ON_SIMULATOR() (sn_prom_type)
#define IS_RUNNING_ON_FAKE_PROM() (sn_prom_type == 2)
extern int sn_prom_type; /* 0=hardware, 1=medusa/realprom, 2=medusa/fakeprom */
#else
#define IS_RUNNING_ON_SIMULATOR() (0)
#define IS_RUNNING_ON_FAKE_PROM() (0)
#define SIMULATOR_SLEEP()
#endif
......
......@@ -223,4 +223,6 @@ struct sn_hwperf_ioctl_args {
#define SN_HWPERF_OP_RECONFIGURE 253
#define SN_HWPERF_OP_INVAL 254
int sn_topology_open(struct inode *inode, struct file *file);
int sn_topology_release(struct inode *inode, struct file *file);
#endif /* SN_HWPERF_H */
......@@ -132,6 +132,8 @@
#define SALRET_INVALID_ARG (-2)
#define SALRET_ERROR (-3)
#define SN_SAL_FAKE_PROM 0x02009999
/**
* sn_sal_rev_major - get the major SGI SAL revision number
......@@ -1105,4 +1107,12 @@ ia64_sn_bte_recovery(nasid_t nasid)
return (int) rv.status;
}
static inline int
ia64_sn_is_fake_prom(void)
{
struct ia64_sal_retval rv;
SAL_CALL_NOLOCK(rv, SN_SAL_FAKE_PROM, 0, 0, 0, 0, 0, 0, 0);
return (rv.status == 0);
}
#endif /* _ASM_IA64_SN_SN_SAL_H */
......@@ -201,6 +201,7 @@ tioca_tlbflush(struct tioca_kernel *tioca_kernel)
}
extern uint32_t tioca_gart_found;
extern struct list_head tioca_list;
extern int tioca_init_provider(void);
extern void tioca_fastwrite_enable(struct tioca_kernel *tioca_kern);
#endif /* _ASM_IA64_SN_TIO_CA_AGP_PROVIDER_H */
......@@ -14,7 +14,10 @@
* videoram directly without any black magic.
*/
#define VGA_MAP_MEM(x) ((unsigned long) ioremap((x), 0))
extern unsigned long vga_console_iobase;
extern unsigned long vga_console_membase;
#define VGA_MAP_MEM(x) ((unsigned long) ioremap(vga_console_membase + (x), 0))
#define vga_readb(x) (*(x))
#define vga_writeb(x,y) (*(y) = (x))
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment