Commit fd402909 authored by Anton Blanchard's avatar Anton Blanchard

Merge samba.org:/scratch/anton/linux-2.5

into samba.org:/scratch/anton/linux-2.5_ppc64
parents 22775da0 95cba9be
......@@ -18,7 +18,8 @@ KERNELLOAD =0xc000000000000000
LINKFLAGS = -T arch/ppc64/vmlinux.lds -Bstatic \
-e $(KERNELLOAD) -Ttext $(KERNELLOAD)
CFLAGS := $(CFLAGS) -fsigned-char -msoft-float -pipe \
-Wno-uninitialized -mminimal-toc -mtraceback=full
-Wno-uninitialized -mminimal-toc -mtraceback=full \
-Wa,-mpower4 -finline-limit-2000
CPP = $(CC) -E $(CFLAGS)
......
......@@ -153,7 +153,7 @@ make_bi_recs(unsigned long addr)
rec = bi_rec_alloc(rec, 2);
rec->tag = BI_MACHTYPE;
rec->data[0] = _MACH_pSeries;
rec->data[0] = PLATFORM_PSERIES;
rec->data[1] = 1;
if ( initrd_size > 0 ) {
......
......@@ -5,7 +5,6 @@
define_bool CONFIG_UID16 n
define_bool CONFIG_RWSEM_GENERIC_SPINLOCK n
define_bool CONFIG_RWSEM_XCHGADD_ALGORITHM y
define_bool CONFIG_GENERIC_BUST_SPINLOCK n
define_bool CONFIG_GENERIC_ISA_DMA y
define_bool CONFIG_HAVE_DEC_LOCK y
......@@ -30,8 +29,6 @@ define_bool CONFIG_PREEMPT n
if [ "$CONFIG_PPC_ISERIES" = "y" ]; then
define_bool CONFIG_MSCHUNKS y
else
bool 'MsChunks Physical to Absolute address translation support' CONFIG_MSCHUNKS
fi
endmenu
......@@ -103,8 +100,12 @@ if [ "$CONFIG_SCSI" != "n" ]; then
fi
endmenu
source drivers/message/fusion/Config.in
source drivers/ieee1394/Config.in
source drivers/message/i2o/Config.in
if [ "$CONFIG_NET" = "y" ]; then
mainmenu_option next_comment
comment 'Network device support'
......@@ -181,6 +182,9 @@ if [ "$CONFIG_VIOCD" = "y" ]; then
fi
source drivers/char/Config.in
source drivers/media/Config.in
source fs/Config.in
mainmenu_option next_comment
......@@ -194,13 +198,21 @@ endmenu
source drivers/usb/Config.in
source net/bluetooth/Config.in
mainmenu_option next_comment
comment 'Kernel hacking'
bool 'Magic SysRq key' CONFIG_MAGIC_SYSRQ
bool 'Include kgdb kernel debugger' CONFIG_KGDB
bool 'Include xmon kernel debugger' CONFIG_XMON
bool 'Include PPCDBG realtime debugging' CONFIG_PPCDBG
bool 'Kernel debugging' CONFIG_DEBUG_KERNEL
if [ "$CONFIG_DEBUG_KERNEL" != "n" ]; then
bool ' Debug memory allocations' CONFIG_DEBUG_SLAB
bool ' Magic SysRq key' CONFIG_MAGIC_SYSRQ
bool ' Include xmon kernel debugger' CONFIG_XMON
if [ "$CONFIG_XMON" = "y" ]; then
bool ' Enable xmon by default' CONFIG_XMON_DEFAULT
fi
bool ' Include PPCDBG realtime debugging' CONFIG_PPCDBG
fi
endmenu
source lib/Config.in
......@@ -4,7 +4,6 @@
# CONFIG_UID16 is not set
# CONFIG_RWSEM_GENERIC_SPINLOCK is not set
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
# CONFIG_GENERIC_BUST_SPINLOCK is not set
CONFIG_GENERIC_ISA_DMA=y
CONFIG_HAVE_DEC_LOCK=y
......@@ -38,9 +37,7 @@ CONFIG_PPC64=y
CONFIG_SMP=y
CONFIG_IRQ_ALL_CPUS=y
# CONFIG_HMT is not set
# CONFIG_PPC_EEH is not set
# CONFIG_PREEMPT is not set
# CONFIG_MSCHUNKS is not set
#
# General setup
......@@ -133,6 +130,11 @@ CONFIG_IPV6=m
#
# CONFIG_IPX is not set
# CONFIG_ATALK is not set
#
# Appletalk devices
#
# CONFIG_DEV_APPLETALK is not set
# CONFIG_DECNET is not set
# CONFIG_BRIDGE is not set
# CONFIG_X25 is not set
......@@ -153,7 +155,6 @@ CONFIG_IPV6=m
# ATA/IDE/MFM/RLL support
#
# CONFIG_IDE is not set
# CONFIG_BLK_DEV_IDE_MODES is not set
# CONFIG_BLK_DEV_HD is not set
#
......@@ -177,6 +178,7 @@ CONFIG_CHR_DEV_SG=y
# Some SCSI devices (e.g. CD jukebox) support multiple LUNs
#
# CONFIG_SCSI_MULTI_LUN is not set
# CONFIG_SCSI_REPORT_LUNS is not set
# CONFIG_SCSI_CONSTANTS is not set
# CONFIG_SCSI_LOGGING is not set
......@@ -232,11 +234,26 @@ CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64
# CONFIG_SCSI_MESH is not set
# CONFIG_SCSI_MAC53C94 is not set
#
# Fusion MPT device support
#
# CONFIG_FUSION is not set
#
# IEEE 1394 (FireWire) support (EXPERIMENTAL)
#
# CONFIG_IEEE1394 is not set
#
# I2O device support
#
# CONFIG_I2O is not set
# CONFIG_I2O_PCI is not set
# CONFIG_I2O_BLOCK is not set
# CONFIG_I2O_LAN is not set
# CONFIG_I2O_SCSI is not set
# CONFIG_I2O_PROC is not set
#
# Network device support
#
......@@ -282,12 +299,9 @@ CONFIG_PCNET32=y
# CONFIG_ADAPTEC_STARFIRE is not set
# CONFIG_APRICOT is not set
# CONFIG_CS89x0 is not set
# CONFIG_DE2104X is not set
# CONFIG_TULIP is not set
# CONFIG_DE4X5 is not set
# CONFIG_DGRS is not set
# CONFIG_DM9102 is not set
CONFIG_EEPRO100=y
# CONFIG_E100 is not set
# CONFIG_LNE390 is not set
# CONFIG_FEALNX is not set
# CONFIG_NATSEMI is not set
......@@ -306,20 +320,21 @@ CONFIG_EEPRO100=y
# CONFIG_TLAN is not set
# CONFIG_VIA_RHINE is not set
# CONFIG_VIA_RHINE_MMIO is not set
# CONFIG_WINBOND_840 is not set
# CONFIG_NET_POCKET is not set
#
# Ethernet (1000 Mbit)
#
CONFIG_ACENIC=y
# CONFIG_ACENIC_OMIT_TIGON_I is not set
CONFIG_ACENIC_OMIT_TIGON_I=y
# CONFIG_DL2K is not set
# CONFIG_E1000 is not set
# CONFIG_MYRI_SBUS is not set
# CONFIG_NS83820 is not set
# CONFIG_HAMACHI is not set
# CONFIG_YELLOWFIN is not set
# CONFIG_SK98LIN is not set
# CONFIG_TIGON3 is not set
# CONFIG_FDDI is not set
# CONFIG_HIPPI is not set
# CONFIG_PLIP is not set
......@@ -337,6 +352,7 @@ CONFIG_ACENIC=y
CONFIG_TR=y
CONFIG_IBMOL=y
# CONFIG_IBMLS is not set
# CONFIG_3C359 is not set
# CONFIG_TMS380TR is not set
# CONFIG_NET_FC is not set
# CONFIG_RCPCI is not set
......@@ -347,6 +363,11 @@ CONFIG_IBMOL=y
#
# CONFIG_WAN is not set
#
# "Tulip" family network device support
#
# CONFIG_NET_TULIP is not set
#
# Amateur Radio support
#
......@@ -360,7 +381,7 @@ CONFIG_IBMOL=y
#
# ISDN subsystem
#
# CONFIG_ISDN is not set
# CONFIG_ISDN_BOOL is not set
#
# Old CD-ROM drivers (not SCSI, not IDE)
......@@ -414,6 +435,25 @@ CONFIG_FONT_8x16=y
# CONFIG_FONT_SUN8x16 is not set
# CONFIG_FONT_PEARL_8x8 is not set
# CONFIG_FONT_ACORN_8x8 is not set
#
# Input device support
#
# CONFIG_INPUT is not set
# CONFIG_INPUT_KEYBDEV is not set
# CONFIG_INPUT_MOUSEDEV is not set
# CONFIG_INPUT_JOYDEV is not set
# CONFIG_INPUT_EVDEV is not set
# CONFIG_GAMEPORT is not set
CONFIG_SOUND_GAMEPORT=y
# CONFIG_GAMEPORT_NS558 is not set
# CONFIG_GAMEPORT_L4 is not set
# CONFIG_INPUT_EMU10K1 is not set
# CONFIG_GAMEPORT_PCIGAME is not set
# CONFIG_GAMEPORT_FM801 is not set
# CONFIG_GAMEPORT_CS461x is not set
# CONFIG_SERIO is not set
# CONFIG_SERIO_SERPORT is not set
CONFIG_VIOPATH=y
#
......@@ -460,6 +500,11 @@ CONFIG_PSMOUSE=y
# CONFIG_AGP is not set
# CONFIG_DRM is not set
#
# Multimedia devices
#
# CONFIG_VIDEO_DEV is not set
#
# File systems
#
......@@ -490,6 +535,9 @@ CONFIG_RAMFS=y
CONFIG_ISO9660_FS=y
# CONFIG_JOLIET is not set
# CONFIG_ZISOFS is not set
CONFIG_JFS_FS=y
# CONFIG_JFS_DEBUG is not set
# CONFIG_JFS_STATISTICS is not set
# CONFIG_MINIX_FS is not set
# CONFIG_VXFS_FS is not set
# CONFIG_NTFS_FS is not set
......@@ -520,9 +568,11 @@ CONFIG_NFS_V3=y
# CONFIG_ROOT_NFS is not set
CONFIG_NFSD=y
CONFIG_NFSD_V3=y
# CONFIG_NFSD_TCP is not set
CONFIG_SUNRPC=y
CONFIG_LOCKD=y
CONFIG_LOCKD_V4=y
CONFIG_EXPORTFS=y
CONFIG_SMB_FS=y
# CONFIG_SMB_NLS_DEFAULT is not set
# CONFIG_NCP_FS is not set
......@@ -569,6 +619,7 @@ CONFIG_NLS_DEFAULT="iso8859-1"
# CONFIG_NLS_CODEPAGE_949 is not set
# CONFIG_NLS_CODEPAGE_874 is not set
# CONFIG_NLS_ISO8859_8 is not set
# CONFIG_NLS_CODEPAGE_1250 is not set
# CONFIG_NLS_CODEPAGE_1251 is not set
# CONFIG_NLS_ISO8859_1 is not set
# CONFIG_NLS_ISO8859_2 is not set
......@@ -596,119 +647,18 @@ CONFIG_NLS_DEFAULT="iso8859-1"
# CONFIG_USB is not set
#
# USB Host Controller Drivers
#
# CONFIG_USB_EHCI_HCD is not set
# CONFIG_USB_OHCI_HCD is not set
# CONFIG_USB_UHCI is not set
# CONFIG_USB_UHCI_ALT is not set
# CONFIG_USB_OHCI is not set
#
# USB Device Class drivers
#
# CONFIG_USB_AUDIO is not set
# CONFIG_USB_BLUETOOTH is not set
# CONFIG_USB_STORAGE is not set
# CONFIG_USB_STORAGE_DEBUG is not set
# CONFIG_USB_STORAGE_DATAFAB is not set
# CONFIG_USB_STORAGE_FREECOM is not set
# CONFIG_USB_STORAGE_ISD200 is not set
# CONFIG_USB_STORAGE_DPCM is not set
# CONFIG_USB_STORAGE_HP8200e is not set
# CONFIG_USB_STORAGE_SDDR09 is not set
# CONFIG_USB_STORAGE_JUMPSHOT is not set
# CONFIG_USB_ACM is not set
# CONFIG_USB_PRINTER is not set
#
# USB Human Interface Devices (HID)
#
# CONFIG_USB_HID is not set
# CONFIG_USB_HIDDEV is not set
# CONFIG_USB_KBD is not set
# CONFIG_USB_MOUSE is not set
# CONFIG_USB_WACOM is not set
#
# USB Imaging devices
#
# CONFIG_USB_DC2XX is not set
# CONFIG_USB_MDC800 is not set
# CONFIG_USB_SCANNER is not set
# CONFIG_USB_MICROTEK is not set
# CONFIG_USB_HPUSBSCSI is not set
#
# USB Multimedia devices
#
# CONFIG_USB_IBMCAM is not set
# CONFIG_USB_OV511 is not set
# CONFIG_USB_PWC is not set
# CONFIG_USB_SE401 is not set
# CONFIG_USB_STV680 is not set
# CONFIG_USB_VICAM is not set
# CONFIG_USB_DSBR is not set
# CONFIG_USB_DABUSB is not set
# CONFIG_USB_KONICAWC is not set
#
# USB Network adaptors
#
# CONFIG_USB_PEGASUS is not set
# CONFIG_USB_KAWETH is not set
# CONFIG_USB_CATC is not set
# CONFIG_USB_CDCETHER is not set
# CONFIG_USB_USBNET is not set
#
# USB port drivers
#
# CONFIG_USB_USS720 is not set
#
# USB Serial Converter support
#
# CONFIG_USB_SERIAL is not set
# CONFIG_USB_SERIAL_GENERIC is not set
# CONFIG_USB_SERIAL_BELKIN is not set
# CONFIG_USB_SERIAL_WHITEHEAT is not set
# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set
# CONFIG_USB_SERIAL_EMPEG is not set
# CONFIG_USB_SERIAL_FTDI_SIO is not set
# CONFIG_USB_SERIAL_VISOR is not set
# CONFIG_USB_SERIAL_IPAQ is not set
# CONFIG_USB_SERIAL_IR is not set
# CONFIG_USB_SERIAL_EDGEPORT is not set
# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set
# CONFIG_USB_SERIAL_KEYSPAN is not set
# CONFIG_USB_SERIAL_KEYSPAN_USA28 is not set
# CONFIG_USB_SERIAL_KEYSPAN_USA28X is not set
# CONFIG_USB_SERIAL_KEYSPAN_USA28XA is not set
# CONFIG_USB_SERIAL_KEYSPAN_USA28XB is not set
# CONFIG_USB_SERIAL_KEYSPAN_USA19 is not set
# CONFIG_USB_SERIAL_KEYSPAN_USA18X is not set
# CONFIG_USB_SERIAL_KEYSPAN_USA19W is not set
# CONFIG_USB_SERIAL_KEYSPAN_USA49W is not set
# CONFIG_USB_SERIAL_MCT_U232 is not set
# CONFIG_USB_SERIAL_KLSI is not set
# CONFIG_USB_SERIAL_PL2303 is not set
# CONFIG_USB_SERIAL_CYBERJACK is not set
# CONFIG_USB_SERIAL_XIRCOM is not set
# CONFIG_USB_SERIAL_OMNINET is not set
#
# USB Miscellaneous drivers
# Bluetooth support
#
# CONFIG_USB_RIO500 is not set
# CONFIG_USB_AUERSWALD is not set
# CONFIG_BLUEZ is not set
#
# Kernel hacking
#
CONFIG_DEBUG_KERNEL=y
# CONFIG_DEBUG_SLAB is not set
CONFIG_MAGIC_SYSRQ=y
# CONFIG_KGDB is not set
CONFIG_XMON=y
CONFIG_XMON_DEFAULT=y
# CONFIG_PPCDBG is not set
#
......
......@@ -32,8 +32,6 @@ obj-$(CONFIG_PCI) += pSeries_pci.o pSeries_lpar.o pSeries_hvCall.o eeh.o
obj-y += rtasd.o nvram.o
endif
obj-$(CONFIG_KGDB) += ppc-stub.o
obj-$(CONFIG_SMP) += smp.o
obj-y += prom.o lmb.o rtas.o rtas-proc.o chrp_setup.o i8259.o
......
......@@ -56,7 +56,6 @@ show_syscalls_task:
* Handle a system call.
*/
_GLOBAL(DoSyscall)
std r0,THREAD+LAST_SYSCALL(r13)
ld r11,_CCR(r1) /* Clear SO bit in CR */
lis r10,0x1000
andc r11,r11,r10
......
......@@ -84,6 +84,8 @@ int cpu_idle(void)
lpaca = get_paca();
while (1) {
irq_stat[smp_processor_id()].idle_timestamp = jiffies;
if (lpaca->xLpPaca.xSharedProc) {
if (ItLpQueue_isLpIntPending(lpaca->lpQueuePtr))
process_iSeries_events();
......@@ -123,6 +125,7 @@ int cpu_idle(void)
long oldval;
while (1) {
irq_stat[smp_processor_id()].idle_timestamp = jiffies;
oldval = test_and_clear_thread_flag(TIF_NEED_RESCHED);
if (!oldval) {
......@@ -146,3 +149,8 @@ int cpu_idle(void)
}
#endif /* CONFIG_PPC_ISERIES */
void default_idle(void)
{
barrier();
}
......@@ -3756,6 +3756,7 @@ COMPATIBLE_IOCTL(TCSETSW),
COMPATIBLE_IOCTL(TCSETSF),
COMPATIBLE_IOCTL(TIOCLINUX),
COMPATIBLE_IOCTL(TIOCSTART),
COMPATIBLE_IOCTL(TIOCSTOP),
/* Little t */
COMPATIBLE_IOCTL(TIOCGETD),
COMPATIBLE_IOCTL(TIOCSETD),
......@@ -4336,8 +4337,6 @@ COMPATIBLE_IOCTL(RNDCLEARPOOL),
COMPATIBLE_IOCTL(HCIDEVUP),
COMPATIBLE_IOCTL(HCIDEVDOWN),
COMPATIBLE_IOCTL(HCIDEVRESET),
COMPATIBLE_IOCTL(HCIRESETSTAT),
COMPATIBLE_IOCTL(HCIGETINFO),
COMPATIBLE_IOCTL(HCIGETDEVLIST),
COMPATIBLE_IOCTL(HCISETRAW),
COMPATIBLE_IOCTL(HCISETSCAN),
......
......@@ -75,22 +75,6 @@ irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned =
int ppc_spurious_interrupts = 0;
struct irqaction *ppc_irq_action[NR_IRQS];
unsigned long lpEvent_count = 0;
#ifdef CONFIG_XMON
extern void xmon(struct pt_regs *regs);
extern int xmon_bpt(struct pt_regs *regs);
extern int xmon_sstep(struct pt_regs *regs);
extern int xmon_iabr_match(struct pt_regs *regs);
extern int xmon_dabr_match(struct pt_regs *regs);
extern void (*xmon_fault_handler)(struct pt_regs *regs);
#endif
#ifdef CONFIG_XMON
extern void (*debugger)(struct pt_regs *regs);
extern int (*debugger_bpt)(struct pt_regs *regs);
extern int (*debugger_sstep)(struct pt_regs *regs);
extern int (*debugger_iabr_match)(struct pt_regs *regs);
extern int (*debugger_dabr_match)(struct pt_regs *regs);
extern void (*debugger_fault_handler)(struct pt_regs *regs);
#endif
/* nasty hack for shared irq's since we need to do kmalloc calls but
* can't very early in the boot when we need to do a request irq.
......@@ -410,6 +394,75 @@ handle_irq_event(int irq, struct pt_regs *regs, struct irqaction *action)
__cli();
}
#ifdef CONFIG_SMP
extern unsigned int irq_affinity [NR_IRQS];
typedef struct {
unsigned long cpu;
unsigned long timestamp;
} ____cacheline_aligned irq_balance_t;
static irq_balance_t irq_balance[NR_IRQS] __cacheline_aligned
= { [ 0 ... NR_IRQS-1 ] = { 1, 0 } };
#define IDLE_ENOUGH(cpu,now) \
(idle_cpu(cpu) && ((now) - irq_stat[(cpu)].idle_timestamp > ((HZ/100)+1)))
#define IRQ_ALLOWED(cpu,allowed_mask) \
((1 << cpu) & (allowed_mask))
static unsigned long move(unsigned long curr_cpu, unsigned long allowed_mask,
unsigned long now, int direction)
{
int search_idle = 1;
int cpu = curr_cpu;
goto inside;
do {
if (unlikely(cpu == curr_cpu))
search_idle = 0;
inside:
if (direction == 1) {
cpu++;
if (cpu >= smp_num_cpus)
cpu = 0;
} else {
cpu--;
if (cpu == -1)
cpu = smp_num_cpus-1;
}
} while (!IRQ_ALLOWED(cpu,allowed_mask) ||
(search_idle && !IDLE_ENOUGH(cpu,now)));
return cpu;
}
static inline void balance_irq(int irq)
{
irq_balance_t *entry = irq_balance + irq;
unsigned long now = jiffies;
if (unlikely(entry->timestamp != now)) {
unsigned long allowed_mask;
unsigned long random_number;
if (!irq_desc[irq].handler->set_affinity)
return;
random_number = mftb();
random_number &= 1;
allowed_mask = cpu_online_map & irq_affinity[irq];
entry->timestamp = now;
entry->cpu = move(entry->cpu, allowed_mask, now, random_number);
irq_desc[irq].handler->set_affinity(irq, 1 << entry->cpu);
}
}
#else
#define balance_irq(irq) do { } while (0)
#endif
/*
* Eventually, this should take an array of interrupts and an array size
* so it can dispatch multiple interrupts.
......@@ -421,6 +474,8 @@ void ppc_irq_dispatch_handler(struct pt_regs *regs, int irq)
int cpu = smp_processor_id();
irq_desc_t *desc = irq_desc + irq;
balance_irq(irq);
kstat.irqs[cpu][irq]++;
spin_lock(&desc->lock);
ack_irq(irq);
......
......@@ -265,24 +265,6 @@ _GLOBAL(__flush_dcache_icache)
isync
blr
/*
* Copy a whole page. Assumes a 4096B page size.
*/
_GLOBAL(copy_page)
clrrdi r3,r3,12 /* Page align */
clrrdi r4,r4,12 /* Page align */
li r5,256
mtctr r5
addi r3,r3,-8
addi r4,r4,-8
1: ld r6,8(r4)
ldu r7,16(r4)
std r6,8(r3)
stdu r7,16(r3)
bdnz+ 1b
blr
/*
* I/O string operations
*
......@@ -649,7 +631,7 @@ _GLOBAL(sys_call_table32)
.llong .sys32_init_module
.llong .sys32_delete_module
.llong .sys32_get_kernel_syms /* 130 */
.llong .sys32_quotactl
.llong .sys_quotactl
.llong .sys32_getpgid
.llong .sys_fchdir
.llong .sys32_bdflush
......@@ -740,7 +722,11 @@ _GLOBAL(sys_call_table32)
.llong .sys_lremovexattr
.llong .sys_fremovexattr /* 220 */
.llong .sys_futex
.rept NR_syscalls-221
.llong .sys_ni_syscall /* reserved for tux */
.llong .sys32_sched_setaffinity
.llong .sys32_sched_getaffinity
.rept NR_syscalls-224
.llong .sys_ni_syscall
.endr
#endif
......@@ -969,6 +955,10 @@ _GLOBAL(sys_call_table)
.llong .sys_lremovexattr
.llong .sys_fremovexattr /* 220 */
.llong .sys_futex
.rept NR_syscalls-221
.llong .sys_ni_syscall /* reserved for tux */
.llong .sys_sched_setaffinity
.llong .sys_sched_getaffinity
.rept NR_syscalls-224
.llong .sys_ni_syscall
.endr
......@@ -52,7 +52,6 @@ main(void)
/* task_struct->thread */
DEFINE(THREAD, offsetof(struct task_struct, thread));
DEFINE(LAST_SYSCALL, offsetof(struct thread_struct, last_syscall));
DEFINE(PT_REGS, offsetof(struct thread_struct, regs));
DEFINE(THREAD_FPR0, offsetof(struct thread_struct, fpr[0]));
DEFINE(THREAD_FPSCR, offsetof(struct thread_struct, fpscr));
......
......@@ -359,7 +359,7 @@ static void pSeries_hpte_invalidate(unsigned long slot, unsigned long va,
/* Invalidate the tlb */
if (!large && local && __is_processor(PV_POWER4)) {
_tlbiel(va, large);
_tlbiel(va);
} else {
spin_lock_irqsave(&pSeries_tlbie_lock, flags);
_tlbie(va, large);
......
......@@ -498,6 +498,8 @@ pcibios_init(void)
}
subsys_initcall(pcibios_init);
int __init
pcibios_assign_all_busses(void)
{
......
This diff is collapsed.
......@@ -257,7 +257,7 @@ EXPORT_SYMBOL(console_drivers);
EXPORT_SYMBOL(xmon);
#endif
#if defined(CONFIG_KGDB) || defined(CONFIG_XMON)
#ifdef CONFIG_DEBUG_KERNEL
extern void (*debugger)(struct pt_regs *regs);
extern int (*debugger_bpt)(struct pt_regs *regs);
extern int (*debugger_sstep)(struct pt_regs *regs);
......
......@@ -117,7 +117,6 @@ void show_regs(struct pt_regs * regs)
regs->msr&MSR_DR ? 1 : 0);
printk("TASK = %p[%d] '%s' ",
current, current->pid, current->comm);
printk("Last syscall: %ld ", current->thread.last_syscall);
printk("\nlast math %p ", last_task_used_math);
#ifdef CONFIG_SMP
......@@ -223,8 +222,6 @@ copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
memcpy(&p->thread.fpr, &current->thread.fpr, sizeof(p->thread.fpr));
p->thread.fpscr = current->thread.fpscr;
p->thread.last_syscall = -1;
return 0;
}
......
......@@ -878,7 +878,7 @@ prom_initialize_tce_table(void)
phandle node;
ihandle phb_node;
unsigned long offset = reloc_offset();
char compatible[64], path[64], type[64];
char compatible[64], path[64], type[64], model[64];
unsigned long i, table = 0;
unsigned long base, vbase, align;
unsigned int minalign, minsize;
......@@ -893,16 +893,25 @@ prom_initialize_tce_table(void)
for (node = 0; prom_next_node(&node); ) {
compatible[0] = 0;
type[0] = 0;
model[0] = 0;
call_prom(RELOC("getprop"), 4, 1, node, RELOC("compatible"),
compatible, sizeof(compatible));
call_prom(RELOC("getprop"), 4, 1, node, RELOC("device_type"),
type, sizeof(type));
call_prom(RELOC("getprop"), 4, 1, node, RELOC("model"),
model, sizeof(model));
if ((compatible[0] == 0) ||
((strstr(compatible, RELOC("python")) == NULL) &&
(strstr(compatible, RELOC("Speedwagon")) == NULL))) {
/* Keep the old logic in tack to avoid regression. */
if (compatible[0] != 0) {
if((strstr(compatible, RELOC("python")) == NULL) &&
(strstr(compatible, RELOC("Speedwagon")) == NULL))
continue;
} else if (model[0] != 0) {
if ((strstr(model, RELOC("ython")) == NULL) &&
(strstr(model, RELOC("peedwagon")) == NULL))
continue;
}
if ((type[0] == 0) || (strstr(type, RELOC("pci")) == NULL)) {
continue;
}
......
......@@ -130,6 +130,14 @@ void setup_system(unsigned long r3, unsigned long r4, unsigned long r5,
/* This should be fixed properly in kernel/resource.c */
iomem_resource.end = MEM_SPACE_LIMIT;
#ifdef CONFIG_XMON_DEFAULT
debugger = xmon;
debugger_bpt = xmon_bpt;
debugger_sstep = xmon_sstep;
debugger_iabr_match = xmon_iabr_match;
debugger_dabr_match = xmon_dabr_match;
#endif
/* pSeries systems are identified in prom.c via OF. */
if ( itLpNaca.xLparInstalled == 1 )
naca->platform = PLATFORM_ISERIES_LPAR;
......@@ -507,11 +515,6 @@ void __init setup_arch(char **cmdline_p)
ppc_md.progress("setup_arch:enter", 0x3eab);
#if defined(CONFIG_KGDB)
kgdb_map_scc();
set_debug_traps();
breakpoint();
#endif
/*
* Set cache line size based on type of cpu as a default.
* Systems with OF can look in the properties on the cpu node(s)
......@@ -543,26 +546,6 @@ void __init setup_arch(char **cmdline_p)
ppc_md.progress("setup_arch: exit", 0x3eab);
}
void exception_trace(unsigned long trap)
{
unsigned long x, srr0, srr1, reg20, reg1, reg21;
asm("mflr %0" : "=r" (x) :);
asm("mfspr %0,0x1a" : "=r" (srr0) :);
asm("mfspr %0,0x1b" : "=r" (srr1) :);
asm("mr %0,1" : "=r" (reg1) :);
asm("mr %0,20" : "=r" (reg20) :);
asm("mr %0,21" : "=r" (reg21) :);
udbg_puts("\n");
udbg_puts("Took an exception : "); udbg_puthex(x); udbg_puts("\n");
udbg_puts(" "); udbg_puthex(reg1); udbg_puts("\n");
udbg_puts(" "); udbg_puthex(reg20); udbg_puts("\n");
udbg_puts(" "); udbg_puthex(reg21); udbg_puts("\n");
udbg_puts(" "); udbg_puthex(srr0); udbg_puts("\n");
udbg_puts(" "); udbg_puthex(srr1); udbg_puts("\n");
}
int set_spread_lpevents( char * str )
{
/* The parameter is the number of processors to share in processing lp events */
......
This diff is collapsed.
This diff is collapsed.
......@@ -31,6 +31,7 @@
/* #include <linux/openpic.h> */
#include <linux/spinlock.h>
#include <linux/cache.h>
#include <linux/err.h>
#include <asm/ptrace.h>
#include <asm/atomic.h>
......@@ -58,7 +59,6 @@ volatile int smp_commenced = 0;
int smp_num_cpus = 1;
int smp_tb_synchronized = 0;
spinlock_t kernel_flag __cacheline_aligned = SPIN_LOCK_UNLOCKED;
cycles_t cacheflush_time;
unsigned long cache_decay_ticks;
static int max_cpus __initdata = NR_CPUS;
......@@ -595,12 +595,13 @@ void __init smp_boot_cpus(void)
}
/*
* XXX very rough, assumes 20 bus cycles to read a cache line,
* timebase increments every 4 bus cycles, 32kB L1 data cache.
* XXX very rough. On POWER4 we optimise tlb flushes for
* tasks that only run on one cpu so we increase decay ticks.
*/
cacheflush_time = 5 * 1024;
/* XXX - Fix - Anton */
cache_decay_ticks = 0;
if (__is_processor(PV_POWER4))
cache_decay_ticks = HZ/50;
else
cache_decay_ticks = HZ/100;
/* Probe arch for CPUs */
cpu_nr = ppc_md.smp_probe();
......
This diff is collapsed.
......@@ -38,7 +38,6 @@
#include <linux/smb_fs.h>
#include <linux/smb_mount.h>
#include <linux/ncp_fs.h>
#include <linux/quota.h>
#include <linux/module.h>
#include <linux/sunrpc/svc.h>
#include <linux/nfsd/nfsd.h>
......@@ -66,6 +65,7 @@
#include <asm/ppcdebug.h>
#include <asm/time.h>
#include <asm/ppc32.h>
#include <asm/mmu_context.h>
extern unsigned long wall_jiffies;
#define USEC_PER_SEC (1000000)
......@@ -518,68 +518,6 @@ struct dqblk32 {
__kernel_time_t32 dqb_itime;
};
extern asmlinkage long sys_quotactl(int cmd, const char *special, int id, caddr_t addr);
/* Note: it is necessary to treat cmd and id as unsigned ints,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
* and the register representation of a signed int (msr in 64-bit mode) is performed.
*/
asmlinkage long sys32_quotactl(u32 cmd_parm, const char *special, u32 id_parm, unsigned long addr)
{
int cmd = (int)cmd_parm;
int id = (int)id_parm;
int cmds = cmd >> SUBCMDSHIFT;
int err;
struct dqblk d;
mm_segment_t old_fs;
char *spec;
PPCDBG(PPCDBG_SYS32, "sys32_quotactl - entered - pid=%ld current=%lx comm=%s \n",
current->pid, current, current->comm);
switch (cmds) {
case Q_GETQUOTA:
break;
case Q_SETQUOTA:
case Q_SETUSE:
case Q_SETQLIM:
if (copy_from_user (&d, (struct dqblk32 *)addr,
sizeof (struct dqblk32)))
return -EFAULT;
d.dqb_itime = ((struct dqblk32 *)&d)->dqb_itime;
d.dqb_btime = ((struct dqblk32 *)&d)->dqb_btime;
break;
default:
return sys_quotactl(cmd, special,
id, (caddr_t)addr);
}
spec = getname32 (special);
err = PTR_ERR(spec);
if (IS_ERR(spec)) return err;
old_fs = get_fs ();
set_fs (KERNEL_DS);
err = sys_quotactl(cmd, (const char *)spec, id, (caddr_t)&d);
set_fs (old_fs);
putname (spec);
if (cmds == Q_GETQUOTA) {
__kernel_time_t b = d.dqb_btime, i = d.dqb_itime;
((struct dqblk32 *)&d)->dqb_itime = i;
((struct dqblk32 *)&d)->dqb_btime = b;
if (copy_to_user ((struct dqblk32 *)addr, &d,
sizeof (struct dqblk32)))
return -EFAULT;
}
PPCDBG(PPCDBG_SYS32, "sys32_quotactl - exited - pid=%ld current=%lx comm=%s \n",
current->pid, current, current->comm);
return err;
}
/* readdir & getdents */
#define NAME_OFFSET(de) ((int) ((de)->d_name - (char *) (de)))
#define ROUND_UP(x) (((x)+sizeof(u32)-1) & ~(sizeof(u32)-1))
......@@ -900,15 +838,6 @@ asmlinkage long sys32_select(int n, u32 *inp, u32 *outp, u32 *exp, u32 tvp_x)
return ret;
}
/*
* Due to some executables calling the wrong select we sometimes
* get wrong args. This determines how the args are being passed
* (a single ptr to them all args passed) then calls
* sys_select() with the appropriate args. -- Cort
*/
/* Note: it is necessary to treat n as an unsigned int,
* with the corresponding cast to a signed int to insure that the
* proper conversion (sign extension) between the register representation of a signed int (msr in 32-bit mode)
......@@ -916,14 +845,9 @@ asmlinkage long sys32_select(int n, u32 *inp, u32 *outp, u32 *exp, u32 tvp_x)
*/
asmlinkage int ppc32_select(u32 n, u32* inp, u32* outp, u32* exp, u32 tvp_x)
{
if ((unsigned int)n >= 4096)
panic("ppc32_select - wrong arguments were passed in \n");
return sys32_select((int)n, inp, outp, exp, tvp_x);
}
static int cp_new_stat32(struct kstat *stat, struct stat32 *statbuf)
{
int err;
......@@ -3800,30 +3724,37 @@ static int do_execve32(char * filename, u32 * argv, u32 * envp, struct pt_regs *
int retval;
int i;
bprm.p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
memset(bprm.page, 0, MAX_ARG_PAGES * sizeof(bprm.page[0]));
file = open_exec(filename);
retval = PTR_ERR(file);
if (IS_ERR(file))
return retval;
bprm.p = PAGE_SIZE*MAX_ARG_PAGES-sizeof(void *);
memset(bprm.page, 0, MAX_ARG_PAGES * sizeof(bprm.page[0]));
bprm.file = file;
bprm.filename = filename;
bprm.sh_bang = 0;
bprm.loader = 0;
bprm.exec = 0;
if ((bprm.argc = count32(argv, bprm.p / sizeof(u32))) < 0) {
allow_write_access(file);
fput(file);
return bprm.argc;
}
if ((bprm.envc = count32(envp, bprm.p / sizeof(u32))) < 0) {
allow_write_access(file);
fput(file);
return bprm.argc;
}
bprm.mm = mm_alloc();
retval = -ENOMEM;
if (!bprm.mm)
goto out_file;
retval = init_new_context(current, bprm.mm);
if (retval < 0)
goto out_mm;
bprm.argc = count32(argv, bprm.p / sizeof(u32));
if ((retval = bprm.argc) < 0)
goto out_mm;
bprm.envc = count32(envp, bprm.p / sizeof(u32));
if ((retval = bprm.envc) < 0)
goto out_mm;
retval = prepare_binprm(&bprm);
if (retval < 0)
......@@ -3842,21 +3773,27 @@ static int do_execve32(char * filename, u32 * argv, u32 * envp, struct pt_regs *
if (retval < 0)
goto out;
retval = search_binary_handler(&bprm, regs);
retval = search_binary_handler(&bprm,regs);
if (retval >= 0)
/* execve success */
return retval;
out:
/* Something went wrong, return the inode and free the argument pages*/
allow_write_access(bprm.file);
if (bprm.file)
fput(bprm.file);
for (i = 0 ; i < MAX_ARG_PAGES ; i++) {
struct page * page = bprm.page[i];
if (page)
__free_page(page);
}
for (i=0 ; i<MAX_ARG_PAGES ; i++)
if (bprm.page[i])
__free_page(bprm.page[i]);
out_mm:
mmdrop(bprm.mm);
out_file:
if (bprm.file) {
allow_write_access(bprm.file);
fput(bprm.file);
}
return retval;
}
......@@ -3867,11 +3804,6 @@ asmlinkage long sys32_execve(unsigned long a0, unsigned long a1, unsigned long a
int error;
char * filename;
ifppcdebug(PPCDBG_SYS32) {
udbg_printf("sys32_execve - entered - pid=%ld, comm=%s \n", current->pid, current->comm);
//PPCDBG(PPCDBG_SYS32NI, " a0=%lx, a1=%lx, a2=%lx, a3=%lx, a4=%lx, a5=%lx, regs=%p \n", a0, a1, a2, a3, a4, a5, regs);
}
filename = getname((char *) a0);
error = PTR_ERR(filename);
if (IS_ERR(filename))
......@@ -3886,10 +3818,6 @@ asmlinkage long sys32_execve(unsigned long a0, unsigned long a1, unsigned long a
putname(filename);
out:
ifppcdebug(PPCDBG_SYS32) {
udbg_printf("sys32_execve - exited - returning %x - pid=%ld \n", error, current->pid);
//udbg_printf("sys32_execve - at exit - regs->gpr[1]=%lx, gpr[3]=%lx, gpr[4]=%lx, gpr[5]=%lx, gpr[6]=%lx \n", regs->gpr[1], regs->gpr[3], regs->gpr[4], regs->gpr[5], regs->gpr[6]);
}
return error;
}
......@@ -4671,3 +4599,53 @@ asmlinkage long sys32_time(__kernel_time_t32* tloc)
return secs;
}
extern asmlinkage int sys_sched_setaffinity(pid_t pid, unsigned int len,
unsigned long *user_mask_ptr);
asmlinkage int sys32_sched_setaffinity(__kernel_pid_t32 pid, unsigned int len,
u32 *user_mask_ptr)
{
unsigned long kernel_mask;
mm_segment_t old_fs;
int ret;
if (get_user(kernel_mask, user_mask_ptr))
return -EFAULT;
old_fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_sched_setaffinity(pid,
/* XXX Nice api... */
sizeof(kernel_mask),
&kernel_mask);
set_fs(old_fs);
return ret;
}
extern asmlinkage int sys_sched_getaffinity(pid_t pid, unsigned int len,
unsigned long *user_mask_ptr);
asmlinkage int sys32_sched_getaffinity(__kernel_pid_t32 pid, unsigned int len,
u32 *user_mask_ptr)
{
unsigned long kernel_mask;
mm_segment_t old_fs;
int ret;
old_fs = get_fs();
set_fs(KERNEL_DS);
ret = sys_sched_getaffinity(pid,
/* XXX Nice api... */
sizeof(kernel_mask),
&kernel_mask);
set_fs(old_fs);
if (ret == 0) {
if (put_user(kernel_mask, user_mask_ptr))
ret = -EFAULT;
}
return ret;
}
......@@ -38,29 +38,12 @@
#include <asm/ppcdebug.h>
extern int fix_alignment(struct pt_regs *);
extern void bad_page_fault(struct pt_regs *, unsigned long);
extern void bad_page_fault(struct pt_regs *, unsigned long, int);
/* This is true if we are using the firmware NMI handler (typically LPAR) */
extern int fwnmi_active;
#ifdef CONFIG_XMON
extern void xmon(struct pt_regs *regs);
extern int xmon_bpt(struct pt_regs *regs);
extern int xmon_sstep(struct pt_regs *regs);
extern int xmon_iabr_match(struct pt_regs *regs);
extern int xmon_dabr_match(struct pt_regs *regs);
extern void (*xmon_fault_handler)(struct pt_regs *regs);
#endif
#ifdef CONFIG_XMON
void (*debugger)(struct pt_regs *regs) = xmon;
int (*debugger_bpt)(struct pt_regs *regs) = xmon_bpt;
int (*debugger_sstep)(struct pt_regs *regs) = xmon_sstep;
int (*debugger_iabr_match)(struct pt_regs *regs) = xmon_iabr_match;
int (*debugger_dabr_match)(struct pt_regs *regs) = xmon_dabr_match;
void (*debugger_fault_handler)(struct pt_regs *regs);
#else
#ifdef CONFIG_KGDB
#ifdef CONFIG_DEBUG_KERNEL
void (*debugger)(struct pt_regs *regs);
int (*debugger_bpt)(struct pt_regs *regs);
int (*debugger_sstep)(struct pt_regs *regs);
......@@ -68,30 +51,44 @@ int (*debugger_iabr_match)(struct pt_regs *regs);
int (*debugger_dabr_match)(struct pt_regs *regs);
void (*debugger_fault_handler)(struct pt_regs *regs);
#endif
#endif
/*
* Trap & Exception support
*/
void
_exception(int signr, struct pt_regs *regs)
/* Should we panic on bad kernel exceptions or try to recover */
#undef PANIC_ON_ERROR
static spinlock_t die_lock = SPIN_LOCK_UNLOCKED;
void die(const char *str, struct pt_regs *regs, long err)
{
if (!user_mode(regs))
{
console_verbose();
spin_lock_irq(&die_lock);
bust_spinlocks(1);
printk("Oops: %s, sig: %ld\n", str, err);
show_regs(regs);
#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
debugger(regs);
#endif
print_backtrace((unsigned long *)regs->gpr[1]);
panic("Exception in kernel pc %lx signal %d",regs->nip,signr);
#if defined(CONFIG_PPCDBG) && (defined(CONFIG_XMON) || defined(CONFIG_KGDB))
/* Allow us to catch SIGILLs for 64-bit app/glibc debugging. -Peter */
} else if (signr == SIGILL) {
ifppcdebug(PPCDBG_SIGNALXMON)
debugger(regs);
bust_spinlocks(0);
spin_unlock_irq(&die_lock);
#ifdef PANIC_ON_ERROR
panic(str);
#else
do_exit(SIGSEGV);
#endif
}
static void
_exception(int signr, siginfo_t *info, struct pt_regs *regs)
{
if (!user_mode(regs)) {
if (debugger)
debugger(regs);
die("Exception in kernel mode\n", regs, signr);
}
force_sig(signr, current);
force_sig_info(signr, info, current);
}
/* Get the error information for errors coming through the
......@@ -130,9 +127,8 @@ static void FWNMI_release_errinfo(void)
void
SystemResetException(struct pt_regs *regs)
{
char *msg = "System Reset in kernel mode.\n";
udbg_printf(msg); printk(msg);
if (fwnmi_active) {
char *msg;
unsigned long *r3 = __va(regs->gpr[3]); /* for FWNMI debug */
struct rtas_error_log *errlog;
......@@ -140,17 +136,31 @@ SystemResetException(struct pt_regs *regs)
udbg_printf(msg, r3); printk(msg, r3);
errlog = FWNMI_get_errinfo(regs);
}
#if defined(CONFIG_XMON)
xmon(regs);
udbg_printf("leaving xmon...\n");
if (debugger)
debugger(regs);
#ifdef PANIC_ON_ERROR
panic("System Reset");
#else
for(;;);
/* Must die if the interrupt is not recoverable */
if (!(regs->msr & MSR_RI))
panic("Unrecoverable System Reset");
#endif
/* What should we do here? We could issue a shutdown or hard reset. */
}
static int power4_handle_mce(struct pt_regs *regs)
{
return 0;
}
void
MachineCheckException(struct pt_regs *regs)
{
siginfo_t info;
if (fwnmi_active) {
struct rtas_error_log *errhdr = FWNMI_get_errinfo(regs);
if (errhdr) {
......@@ -158,117 +168,221 @@ MachineCheckException(struct pt_regs *regs)
}
FWNMI_release_errinfo();
}
if ( !user_mode(regs) )
{
#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
if (!user_mode(regs)) {
/* Attempt to recover if the interrupt is recoverable */
if (regs->msr & MSR_RI) {
if (__is_processor(PV_POWER4) &&
power4_handle_mce(regs))
return;
}
if (debugger_fault_handler) {
debugger_fault_handler(regs);
return;
}
#endif
if (debugger)
debugger(regs);
console_verbose();
spin_lock_irq(&die_lock);
bust_spinlocks(1);
printk("Machine check in kernel mode.\n");
printk("Caused by (from SRR1=%lx): ", regs->msr);
show_regs(regs);
#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
debugger(regs);
#endif
print_backtrace((unsigned long *)regs->gpr[1]);
panic("machine check");
bust_spinlocks(0);
spin_unlock_irq(&die_lock);
panic("Unrecoverable Machine Check");
}
_exception(SIGSEGV, regs);
}
void
SMIException(struct pt_regs *regs)
{
#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
{
debugger(regs);
return;
}
#endif
show_regs(regs);
print_backtrace((unsigned long *)regs->gpr[1]);
panic("System Management Interrupt");
/*
* XXX we should check RI bit on exception exit and kill the
* task if it was cleared
*/
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRERR;
info.si_addr = (void *)regs->nip;
_exception(SIGSEGV, &info, regs);
}
void
UnknownException(struct pt_regs *regs)
{
siginfo_t info;
printk("Bad trap at PC: %lx, SR: %lx, vector=%lx\n",
regs->nip, regs->msr, regs->trap);
_exception(SIGTRAP, regs);
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = 0;
info.si_addr = 0;
_exception(SIGTRAP, &info, regs);
}
void
InstructionBreakpointException(struct pt_regs *regs)
{
#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
if (debugger_iabr_match(regs))
siginfo_t info;
if (debugger_iabr_match && debugger_iabr_match(regs))
return;
#endif
_exception(SIGTRAP, regs);
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = TRAP_BRKPT;
info.si_addr = (void *)regs->nip;
_exception(SIGTRAP, &info, regs);
}
static void parse_fpe(struct pt_regs *regs)
{
siginfo_t info;
unsigned int *tmp;
unsigned int fpscr;
if (regs->msr & MSR_FP)
giveup_fpu(current);
tmp = &current->thread.fpscr;
fpscr = *tmp;
/* Invalid operation */
if ((fpscr & FPSCR_VE) && (fpscr & FPSCR_VX))
info.si_code = FPE_FLTINV;
/* Overflow */
else if ((fpscr & FPSCR_OE) && (fpscr & FPSCR_OX))
info.si_code = FPE_FLTOVF;
/* Underflow */
else if ((fpscr & FPSCR_UE) && (fpscr & FPSCR_UX))
info.si_code = FPE_FLTUND;
/* Divide by zero */
else if ((fpscr & FPSCR_ZE) && (fpscr & FPSCR_ZX))
info.si_code = FPE_FLTDIV;
/* Inexact result */
else if ((fpscr & FPSCR_XE) && (fpscr & FPSCR_XX))
info.si_code = FPE_FLTRES;
else
info.si_code = 0;
info.si_signo = SIGFPE;
info.si_errno = 0;
info.si_addr = (void *)regs->nip;
_exception(SIGFPE, &info, regs);
}
void
ProgramCheckException(struct pt_regs *regs)
{
siginfo_t info;
if (regs->msr & 0x100000) {
/* IEEE FP exception */
_exception(SIGFPE, regs);
parse_fpe(regs);
} else if (regs->msr & 0x40000) {
/* Privileged instruction */
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_PRVOPC;
info.si_addr = (void *)regs->nip;
_exception(SIGILL, &info, regs);
} else if (regs->msr & 0x20000) {
/* trap exception */
#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
if (debugger_bpt(regs))
if (debugger_bpt && debugger_bpt(regs))
return;
#endif
_exception(SIGTRAP, regs);
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = TRAP_BRKPT;
info.si_addr = (void *)regs->nip;
_exception(SIGTRAP, &info, regs);
} else {
_exception(SIGILL, regs);
/* Illegal instruction */
info.si_signo = SIGILL;
info.si_errno = 0;
info.si_code = ILL_ILLTRP;
info.si_addr = (void *)regs->nip;
_exception(SIGILL, &info, regs);
}
}
void
SingleStepException(struct pt_regs *regs)
{
siginfo_t info;
regs->msr &= ~MSR_SE; /* Turn off 'trace' bit */
#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
if (debugger_sstep(regs))
if (debugger_sstep && debugger_sstep(regs))
return;
#endif
_exception(SIGTRAP, regs);
}
/* Dummy handler for Performance Monitor */
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = TRAP_TRACE;
info.si_addr = (void *)regs->nip;
_exception(SIGTRAP, &info, regs);
}
void
PerformanceMonitorException(struct pt_regs *regs)
{
_exception(SIGTRAP, regs);
siginfo_t info;
info.si_signo = SIGTRAP;
info.si_errno = 0;
info.si_code = TRAP_BRKPT;
info.si_addr = 0;
_exception(SIGTRAP, &info, regs);
}
void
AlignmentException(struct pt_regs *regs)
{
int fixed;
siginfo_t info;
fixed = fix_alignment(regs);
if (fixed == 1) {
ifppcdebug(PPCDBG_ALIGNFIXUP)
if (!user_mode(regs))
PPCDBG(PPCDBG_ALIGNFIXUP, "fix alignment at %lx\n", regs->nip);
PPCDBG(PPCDBG_ALIGNFIXUP, "fix alignment at %lx\n",
regs->nip);
regs->nip += 4; /* skip over emulated instruction */
return;
}
/* Operand address was bad */
if (fixed == -EFAULT) {
/* fixed == -EFAULT means the operand address was bad */
if (user_mode(regs))
force_sig(SIGSEGV, current);
else
bad_page_fault(regs, regs->dar);
if (user_mode(regs)) {
info.si_signo = SIGSEGV;
info.si_errno = 0;
info.si_code = SEGV_MAPERR;
info.si_addr = (void *)regs->dar;
force_sig_info(SIGSEGV, &info, current);
} else {
/* Search exception table */
bad_page_fault(regs, regs->dar, SIGSEGV);
}
return;
}
_exception(SIGBUS, regs);
info.si_signo = SIGBUS;
info.si_errno = 0;
info.si_code = BUS_ADRALN;
info.si_addr = (void *)regs->nip;
_exception(SIGBUS, &info, regs);
}
void __init trap_init(void)
......
......@@ -4,6 +4,7 @@
O_TARGET = lib.o
obj-y := checksum.o dec_and_lock.o string.o strcase.o
obj-y := checksum.o dec_and_lock.o string.o strcase.o copypage.o \
memcpy.o copyuser.o
include $(TOPDIR)/Rules.make
/*
* arch/ppc64/lib/copypage.S
*
* Copyright (C) 2002 Paul Mackerras, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/processor.h>
#include "../kernel/ppc_asm.h"
_GLOBAL(copy_page)
std r31,-8(1)
std r30,-16(1)
std r29,-24(1)
std r28,-32(1)
std r27,-40(1)
std r26,-48(1)
std r25,-56(1)
std r24,-64(1)
std r23,-72(1)
std r22,-80(1)
std r21,-88(1)
std r20,-96(1)
li r5,4096/32 - 1
addi r3,r3,-8
li r12,5
0: addi r5,r5,-24
mtctr r12
ld r22,640(4)
ld r21,512(4)
ld r20,384(4)
ld r11,256(4)
ld r9,128(4)
ld r7,0(4)
ld r25,648(4)
ld r24,520(4)
ld r23,392(4)
ld r10,264(4)
ld r8,136(4)
ldu r6,8(4)
cmpwi r5,24
1: std r22,648(3)
std r21,520(3)
std r20,392(3)
std r11,264(3)
std r9,136(3)
std r7,8(3)
ld r28,648(4)
ld r27,520(4)
ld r26,392(4)
ld r31,264(4)
ld r30,136(4)
ld r29,8(4)
std r25,656(3)
std r24,528(3)
std r23,400(3)
std r10,272(3)
std r8,144(3)
std r6,16(3)
ld r22,656(4)
ld r21,528(4)
ld r20,400(4)
ld r11,272(4)
ld r9,144(4)
ld r7,16(4)
std r28,664(3)
std r27,536(3)
std r26,408(3)
std r31,280(3)
std r30,152(3)
stdu r29,24(3)
ld r25,664(4)
ld r24,536(4)
ld r23,408(4)
ld r10,280(4)
ld r8,152(4)
ldu r6,24(4)
bdnz 1b
std r22,648(3)
std r21,520(3)
std r20,392(3)
std r11,264(3)
std r9,136(3)
std r7,8(3)
addi r4,r4,640
addi r3,r3,648
bge 0b
mtctr r5
ld r7,0(4)
ld r8,8(4)
ldu r9,16(4)
3: ld r10,8(4)
std r7,8(3)
ld r7,16(4)
std r8,16(3)
ld r8,24(4)
std r9,24(3)
ldu r9,32(4)
stdu r10,32(3)
bdnz 3b
4: ld r10,8(4)
std r7,8(3)
std r8,16(3)
std r9,24(3)
std r10,32(3)
9: ld r20,-96(1)
ld r21,-88(1)
ld r22,-80(1)
ld r23,-72(1)
ld r24,-64(1)
ld r25,-56(1)
ld r26,-48(1)
ld r27,-40(1)
ld r28,-32(1)
ld r29,-24(1)
ld r30,-16(1)
ld r31,-8(1)
blr
/*
* arch/ppc64/lib/copyuser.S
*
* Copyright (C) 2002 Paul Mackerras, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/processor.h>
#include "../kernel/ppc_asm.h"
.align 7
_GLOBAL(__copy_tofrom_user)
/* first check for a whole page copy on a page boundary */
cmpldi cr1,r5,16
cmpdi cr6,r5,4096
or r0,r3,r4
neg r6,r3 /* LS 3 bits = # bytes to 8-byte dest bdry */
andi. r0,r0,4095
std r3,-24(r1)
crand cr0*4+2,cr0*4+2,cr6*4+2
std r4,-16(r1)
std r5,-8(r1)
dcbt 0,r4
beq .Lcopy_page
andi. r6,r6,7
mtcrf 0x01,r5
blt cr1,.Lshort_copy
bne .Ldst_unaligned
.Ldst_aligned:
andi. r0,r4,7
addi r3,r3,-16
bne .Lsrc_unaligned
srdi r7,r5,4
20: ld r9,0(r4)
addi r4,r4,-8
mtctr r7
andi. r5,r5,7
bf cr7*4+0,22f
addi r3,r3,8
addi r4,r4,8
mr r8,r9
blt cr1,72f
21: ld r9,8(r4)
70: std r8,8(r3)
22: ldu r8,16(r4)
71: stdu r9,16(r3)
bdnz 21b
72: std r8,8(r3)
beq+ 3f
addi r3,r3,16
23: ld r9,8(r4)
.Ldo_tail:
bf cr7*4+1,1f
rotldi r9,r9,32
73: stw r9,0(r3)
addi r3,r3,4
1: bf cr7*4+2,2f
rotldi r9,r9,16
74: sth r9,0(r3)
addi r3,r3,2
2: bf cr7*4+3,3f
rotldi r9,r9,8
75: stb r9,0(r3)
3: li r3,0
blr
.Lsrc_unaligned:
srdi r6,r5,3
addi r5,r5,-16
subf r4,r0,r4
srdi r7,r5,4
sldi r10,r0,3
cmpldi cr6,r6,3
andi. r5,r5,7
mtctr r7
subfic r11,r10,64
add r5,r5,r0
bt cr7*4+0,28f
24: ld r9,0(r4) /* 3+2n loads, 2+2n stores */
25: ld r0,8(r4)
sld r6,r9,r10
26: ldu r9,16(r4)
srd r7,r0,r11
sld r8,r0,r10
or r7,r7,r6
blt cr6,79f
27: ld r0,8(r4)
b 2f
28: ld r0,0(r4) /* 4+2n loads, 3+2n stores */
29: ldu r9,8(r4)
sld r8,r0,r10
addi r3,r3,-8
blt cr6,5f
30: ld r0,8(r4)
srd r12,r9,r11
sld r6,r9,r10
31: ldu r9,16(r4)
or r12,r8,r12
srd r7,r0,r11
sld r8,r0,r10
addi r3,r3,16
beq cr6,78f
1: or r7,r7,r6
32: ld r0,8(r4)
76: std r12,8(r3)
2: srd r12,r9,r11
sld r6,r9,r10
33: ldu r9,16(r4)
or r12,r8,r12
77: stdu r7,16(r3)
srd r7,r0,r11
sld r8,r0,r10
bdnz 1b
78: std r12,8(r3)
or r7,r7,r6
79: std r7,16(r3)
5: srd r12,r9,r11
or r12,r8,r12
80: std r12,24(r3)
bne 6f
li r3,0
blr
6: cmpwi cr1,r5,8
addi r3,r3,32
sld r9,r9,r10
blt cr1,.Ldo_tail
34: ld r0,8(r4)
srd r7,r0,r11
or r9,r7,r9
b .Ldo_tail
.Ldst_unaligned:
mtcrf 0x01,r6 /* put #bytes to 8B bdry into cr7 */
subf r5,r6,r5
li r7,0
cmpldi r1,r5,16
bf cr7*4+3,1f
35: lbz r0,0(r4)
81: stb r0,0(r3)
addi r7,r7,1
1: bf cr7*4+2,2f
36: lhzx r0,r7,r4
82: sthx r0,r7,r3
addi r7,r7,2
2: bf cr7*4+1,3f
37: lwzx r0,r7,r4
83: stwx r0,r7,r3
3: mtcrf 0x01,r5
add r4,r6,r4
add r3,r6,r3
b .Ldst_aligned
.Lshort_copy:
bf cr7*4+0,1f
38: lwz r0,0(r4)
39: lwz r9,4(r4)
addi r4,r4,8
84: stw r0,0(r3)
85: stw r9,4(r3)
addi r3,r3,8
1: bf cr7*4+1,2f
40: lwz r0,0(r4)
addi r4,r4,4
86: stw r0,0(r3)
addi r3,r3,4
2: bf cr7*4+2,3f
41: lhz r0,0(r4)
addi r4,r4,2
87: sth r0,0(r3)
addi r3,r3,2
3: bf cr7*4+3,4f
42: lbz r0,0(r4)
88: stb r0,0(r3)
4: li r3,0
blr
/*
* exception handlers follow
* we have to return the number of bytes not copied
* for an exception on a load, we set the rest of the destination to 0
*/
136:
137:
add r3,r3,r7
b 1f
130:
131:
addi r3,r3,8
120:
122:
124:
125:
126:
127:
128:
129:
133:
addi r3,r3,8
121:
132:
addi r3,r3,8
123:
134:
135:
138:
139:
140:
141:
142:
/*
* here we have had a fault on a load and r3 points to the first
* unmodified byte of the destination
*/
1: ld r6,-24(r1)
ld r4,-16(r1)
ld r5,-8(r1)
subf r6,r6,r3
add r4,r4,r6
subf r5,r6,r5 /* #bytes left to go */
/*
* first see if we can copy any more bytes before hitting another exception
*/
mtctr r5
43: lbz r0,0(r4)
addi r4,r4,1
89: stb r0,0(r3)
addi r3,r3,1
bdnz 43b
li r3,0 /* huh? all copied successfully this time? */
blr
/*
* here we have trapped again, need to clear ctr bytes starting at r3
*/
143: mfctr r5
li r0,0
mr r4,r3
mr r3,r5 /* return the number of bytes not copied */
1: andi. r9,r4,7
beq 3f
90: stb r0,0(r4)
addic. r5,r5,-1
addi r4,r4,1
bne 1b
blr
3: srdi r9,r5,3
andi. r5,r5,7
mtctr r9
91: std r0,0(r4)
addi r4,r4,8
bdnz 91b
beqlr
mtctr r5
92: stb r0,0(r4)
addi r4,r4,1
bdnz 92b
blr
/*
* exception handlers for stores: we just need to work
* out how many bytes weren't copied
*/
182:
183:
add r3,r3,r7
b 1f
180:
addi r3,r3,8
171:
177:
addi r3,r3,8
170:
172:
176:
178:
addi r3,r3,4
185:
addi r3,r3,4
173:
174:
175:
179:
181:
184:
186:
187:
188:
189:
1:
ld r6,-24(r1)
ld r5,-8(r1)
add r6,r6,r5
subf r3,r3,r6 /* #bytes not copied */
190:
191:
192:
blr /* #bytes not copied in r3 */
.section __ex_table,"a"
.align 3
.llong 20b,120b
.llong 21b,121b
.llong 70b,170b
.llong 22b,122b
.llong 71b,171b
.llong 72b,172b
.llong 23b,123b
.llong 73b,173b
.llong 74b,174b
.llong 75b,175b
.llong 24b,124b
.llong 25b,125b
.llong 26b,126b
.llong 27b,127b
.llong 28b,128b
.llong 29b,129b
.llong 30b,130b
.llong 31b,131b
.llong 32b,132b
.llong 76b,176b
.llong 33b,133b
.llong 77b,177b
.llong 78b,178b
.llong 79b,179b
.llong 80b,180b
.llong 34b,134b
.llong 35b,135b
.llong 81b,181b
.llong 36b,136b
.llong 82b,182b
.llong 37b,137b
.llong 83b,183b
.llong 38b,138b
.llong 39b,139b
.llong 84b,184b
.llong 85b,185b
.llong 40b,140b
.llong 86b,186b
.llong 41b,141b
.llong 87b,187b
.llong 42b,142b
.llong 88b,188b
.llong 43b,143b
.llong 89b,189b
.llong 90b,190b
.llong 91b,191b
.llong 92b,192b
.text
/*
* Routine to copy a whole page of data, optimized for POWER4.
* On POWER4 it is more than 50% faster than the simple loop
* above (following the .Ldst_aligned label) but it runs slightly
* slower on POWER3.
*/
.Lcopy_page:
std r31,-32(1)
std r30,-40(1)
std r29,-48(1)
std r28,-56(1)
std r27,-64(1)
std r26,-72(1)
std r25,-80(1)
std r24,-88(1)
std r23,-96(1)
std r22,-104(1)
std r21,-112(1)
std r20,-120(1)
li r5,4096/32 - 1
addi r3,r3,-8
li r0,5
0: addi r5,r5,-24
mtctr r0
20: ld r22,640(4)
21: ld r21,512(4)
22: ld r20,384(4)
23: ld r11,256(4)
24: ld r9,128(4)
25: ld r7,0(4)
26: ld r25,648(4)
27: ld r24,520(4)
28: ld r23,392(4)
29: ld r10,264(4)
30: ld r8,136(4)
31: ldu r6,8(4)
cmpwi r5,24
1:
32: std r22,648(3)
33: std r21,520(3)
34: std r20,392(3)
35: std r11,264(3)
36: std r9,136(3)
37: std r7,8(3)
38: ld r28,648(4)
39: ld r27,520(4)
40: ld r26,392(4)
41: ld r31,264(4)
42: ld r30,136(4)
43: ld r29,8(4)
44: std r25,656(3)
45: std r24,528(3)
46: std r23,400(3)
47: std r10,272(3)
48: std r8,144(3)
49: std r6,16(3)
50: ld r22,656(4)
51: ld r21,528(4)
52: ld r20,400(4)
53: ld r11,272(4)
54: ld r9,144(4)
55: ld r7,16(4)
56: std r28,664(3)
57: std r27,536(3)
58: std r26,408(3)
59: std r31,280(3)
60: std r30,152(3)
61: stdu r29,24(3)
62: ld r25,664(4)
63: ld r24,536(4)
64: ld r23,408(4)
65: ld r10,280(4)
66: ld r8,152(4)
67: ldu r6,24(4)
bdnz 1b
68: std r22,648(3)
69: std r21,520(3)
70: std r20,392(3)
71: std r11,264(3)
72: std r9,136(3)
73: std r7,8(3)
74: addi r4,r4,640
75: addi r3,r3,648
bge 0b
mtctr r5
76: ld r7,0(4)
77: ld r8,8(4)
78: ldu r9,16(4)
3:
79: ld r10,8(4)
80: std r7,8(3)
81: ld r7,16(4)
82: std r8,16(3)
83: ld r8,24(4)
84: std r9,24(3)
85: ldu r9,32(4)
86: stdu r10,32(3)
bdnz 3b
4:
87: ld r10,8(4)
88: std r7,8(3)
89: std r8,16(3)
90: std r9,24(3)
91: std r10,32(3)
9: ld r20,-120(1)
ld r21,-112(1)
ld r22,-104(1)
ld r23,-96(1)
ld r24,-88(1)
ld r25,-80(1)
ld r26,-72(1)
ld r27,-64(1)
ld r28,-56(1)
ld r29,-48(1)
ld r30,-40(1)
ld r31,-32(1)
li r3,0
blr
/*
* on an exception, reset to the beginning and jump back into the
* standard __copy_tofrom_user
*/
100: ld r3,-24(r1)
ld r4,-24(r1)
li r5,4096
b .Ldst_aligned
.section __ex_table,"a"
.align 3
.llong 20b,100b
.llong 21b,100b
.llong 22b,100b
.llong 23b,100b
.llong 24b,100b
.llong 25b,100b
.llong 26b,100b
.llong 27b,100b
.llong 28b,100b
.llong 29b,100b
.llong 30b,100b
.llong 31b,100b
.llong 32b,100b
.llong 33b,100b
.llong 34b,100b
.llong 35b,100b
.llong 36b,100b
.llong 37b,100b
.llong 38b,100b
.llong 39b,100b
.llong 40b,100b
.llong 41b,100b
.llong 42b,100b
.llong 43b,100b
.llong 44b,100b
.llong 45b,100b
.llong 46b,100b
.llong 47b,100b
.llong 48b,100b
.llong 49b,100b
.llong 50b,100b
.llong 51b,100b
.llong 52b,100b
.llong 53b,100b
.llong 54b,100b
.llong 55b,100b
.llong 56b,100b
.llong 57b,100b
.llong 58b,100b
.llong 59b,100b
.llong 60b,100b
.llong 61b,100b
.llong 62b,100b
.llong 63b,100b
.llong 64b,100b
.llong 65b,100b
.llong 66b,100b
.llong 67b,100b
.llong 68b,100b
.llong 69b,100b
.llong 70b,100b
.llong 71b,100b
.llong 72b,100b
.llong 73b,100b
.llong 74b,100b
.llong 75b,100b
.llong 76b,100b
.llong 77b,100b
.llong 78b,100b
.llong 79b,100b
.llong 80b,100b
.llong 81b,100b
.llong 82b,100b
.llong 83b,100b
.llong 84b,100b
.llong 85b,100b
.llong 86b,100b
.llong 87b,100b
.llong 88b,100b
.llong 89b,100b
.llong 90b,100b
.llong 91b,100b
/*
* arch/ppc64/lib/memcpy.S
*
* Copyright (C) 2002 Paul Mackerras, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <asm/processor.h>
#include "../kernel/ppc_asm.h"
.align 7
_GLOBAL(memcpy)
mtcrf 0x01,r5
cmpldi cr1,r5,16
neg r6,r3 # LS 3 bits = # bytes to 8-byte dest bdry
andi. r6,r6,7
dcbt 0,r4
blt cr1,.Lshort_copy
bne .Ldst_unaligned
.Ldst_aligned:
andi. r0,r4,7
addi r3,r3,-16
bne .Lsrc_unaligned
srdi r7,r5,4
ld r9,0(r4)
addi r4,r4,-8
mtctr r7
andi. r5,r5,7
bf cr7*4+0,2f
addi r3,r3,8
addi r4,r4,8
mr r8,r9
blt cr1,3f
1: ld r9,8(r4)
std r8,8(r3)
2: ldu r8,16(r4)
stdu r9,16(r3)
bdnz 1b
3: std r8,8(r3)
beqlr
addi r3,r3,16
ld r9,8(r4)
.Ldo_tail:
bf cr7*4+1,1f
rotldi r9,r9,32
stw r9,0(r3)
addi r3,r3,4
1: bf cr7*4+2,2f
rotldi r9,r9,16
sth r9,0(r3)
addi r3,r3,2
2: bf cr7*4+3,3f
rotldi r9,r9,8
stb r9,0(r3)
3: blr
.Lsrc_unaligned:
srdi r6,r5,3
addi r5,r5,-16
subf r4,r0,r4
srdi r7,r5,4
sldi r10,r0,3
cmpdi cr6,r6,3
andi. r5,r5,7
mtctr r7
subfic r11,r10,64
add r5,r5,r0
bt cr7*4+0,0f
ld r9,0(r4) # 3+2n loads, 2+2n stores
ld r0,8(r4)
sld r6,r9,r10
ldu r9,16(r4)
srd r7,r0,r11
sld r8,r0,r10
or r7,r7,r6
blt cr6,4f
ld r0,8(r4)
# s1<< in r8, d0=(s0<<|s1>>) in r7, s3 in r0, s2 in r9, nix in r6 & r12
b 2f
0: ld r0,0(r4) # 4+2n loads, 3+2n stores
ldu r9,8(r4)
sld r8,r0,r10
addi r3,r3,-8
blt cr6,5f
ld r0,8(r4)
srd r12,r9,r11
sld r6,r9,r10
ldu r9,16(r4)
or r12,r8,r12
srd r7,r0,r11
sld r8,r0,r10
addi r3,r3,16
beq cr6,3f
# d0=(s0<<|s1>>) in r12, s1<< in r6, s2>> in r7, s2<< in r8, s3 in r9
1: or r7,r7,r6
ld r0,8(r4)
std r12,8(r3)
2: srd r12,r9,r11
sld r6,r9,r10
ldu r9,16(r4)
or r12,r8,r12
stdu r7,16(r3)
srd r7,r0,r11
sld r8,r0,r10
bdnz 1b
3: std r12,8(r3)
or r7,r7,r6
4: std r7,16(r3)
5: srd r12,r9,r11
or r12,r8,r12
std r12,24(r3)
beqlr
cmpwi cr1,r5,8
addi r3,r3,32
sld r9,r9,r10
blt cr1,.Ldo_tail
ld r0,8(r4)
srd r7,r0,r11
or r9,r7,r9
b .Ldo_tail
.Ldst_unaligned:
mtcrf 0x01,r6 # put #bytes to 8B bdry into cr7
subf r5,r6,r5
li r7,0
cmpldi r1,r5,16
bf cr7*4+3,1f
lbz r0,0(r4)
stb r0,0(r3)
addi r7,r7,1
1: bf cr7*4+2,2f
lhzx r0,r7,r4
sthx r0,r7,r3
addi r7,r7,2
2: bf cr7*4+1,3f
lwzx r0,r7,r4
stwx r0,r7,r3
3: mtcrf 0x01,r5
add r4,r6,r4
add r3,r6,r3
b .Ldst_aligned
.Lshort_copy:
bf cr7*4+0,1f
lwz r0,0(r4)
lwz r9,4(r4)
addi r4,r4,8
stw r0,0(r3)
stw r9,4(r3)
addi r3,r3,8
1: bf cr7*4+1,2f
lwz r0,0(r4)
addi r4,r4,4
stw r0,0(r3)
addi r3,r3,4
2: bf cr7*4+2,3f
lhz r0,0(r4)
addi r4,r4,2
sth r0,0(r3)
addi r3,r3,2
3: bf cr7*4+3,4f
lbz r0,0(r4)
stb r0,0(r3)
4: blr
......@@ -12,61 +12,6 @@
#include <asm/processor.h>
#include <asm/errno.h>
#define CACHE_LINE_SIZE 128
#define LG_CACHE_LINE_SIZE 7
#define MAX_COPY_PREFETCH 1
#define COPY_16_BYTES \
lwz r7,4(r4); \
lwz r8,8(r4); \
lwz r9,12(r4); \
lwzu r10,16(r4); \
stw r7,4(r6); \
stw r8,8(r6); \
stw r9,12(r6); \
stwu r10,16(r6)
#define COPY_16_BYTES_WITHEX(n) \
8 ## n ## 0: \
lwz r7,4(r4); \
8 ## n ## 1: \
lwz r8,8(r4); \
8 ## n ## 2: \
lwz r9,12(r4); \
8 ## n ## 3: \
lwzu r10,16(r4); \
8 ## n ## 4: \
stw r7,4(r6); \
8 ## n ## 5: \
stw r8,8(r6); \
8 ## n ## 6: \
stw r9,12(r6); \
8 ## n ## 7: \
stwu r10,16(r6)
#define COPY_16_BYTES_EXCODE(n) \
9 ## n ## 0: \
addi r5,r5,-(16 * n); \
b 104f; \
9 ## n ## 1: \
addi r5,r5,-(16 * n); \
b 105f; \
.section __ex_table,"a"; \
.align 3; \
.llong 8 ## n ## 0b,9 ## n ## 0b; \
.llong 8 ## n ## 1b,9 ## n ## 0b; \
.llong 8 ## n ## 2b,9 ## n ## 0b; \
.llong 8 ## n ## 3b,9 ## n ## 0b; \
.llong 8 ## n ## 4b,9 ## n ## 1b; \
.llong 8 ## n ## 5b,9 ## n ## 1b; \
.llong 8 ## n ## 6b,9 ## n ## 1b; \
.llong 8 ## n ## 7b,9 ## n ## 1b; \
.text
CACHELINE_BYTES = CACHE_LINE_SIZE
LG_CACHELINE_BYTES = LG_CACHE_LINE_SIZE
CACHELINE_MASK = (CACHE_LINE_SIZE-1)
_GLOBAL(strcpy)
addi r5,r3,-1
addi r4,r4,-1
......@@ -148,48 +93,7 @@ _GLOBAL(memset)
_GLOBAL(memmove)
cmplw 0,r3,r4
bgt .backwards_memcpy
/* fall through */
_GLOBAL(memcpy)
srwi. r7,r5,3
addi r6,r3,-4
addi r4,r4,-4
beq 2f /* if less than 8 bytes to do */
andi. r0,r6,3 /* get dest word aligned */
mtctr r7
bne 5f
1: lwz r7,4(r4)
lwzu r8,8(r4)
stw r7,4(r6)
stwu r8,8(r6)
bdnz 1b
andi. r5,r5,7
2: cmplwi 0,r5,4
blt 3f
lwzu r0,4(r4)
addi r5,r5,-4
stwu r0,4(r6)
3: cmpwi 0,r5,0
beqlr
mtctr r5
addi r4,r4,3
addi r6,r6,3
4: lbzu r0,1(r4)
stbu r0,1(r6)
bdnz 4b
blr
5: subfic r0,r0,4
mtctr r0
6: lbz r7,4(r4)
addi r4,r4,1
stb r7,4(r6)
addi r6,r6,1
bdnz 6b
subf r5,r0,r5
rlwinm. r7,r5,32-3,3,31
beq 2b
mtctr r7
b 1b
b .memcpy
_GLOBAL(backwards_memcpy)
rlwinm. r7,r5,32-3,3,31 /* r0 = r5 >> 3 */
......@@ -253,195 +157,6 @@ _GLOBAL(memchr)
2: li r3,0
blr
_GLOBAL(__copy_tofrom_user)
addi r4,r4,-4
addi r6,r3,-4
neg r0,r3
andi. r0,r0,CACHELINE_MASK /* # bytes to start of cache line */
beq 58f
cmplw 0,r5,r0 /* is this more than total to do? */
blt 63f /* if not much to do */
andi. r8,r0,3 /* get it word-aligned first */
mtctr r8
beq+ 61f
70: lbz r9,4(r4) /* do some bytes */
71: stb r9,4(r6)
addi r4,r4,1
addi r6,r6,1
bdnz 70b
61: subf r5,r0,r5
srwi. r0,r0,2
mtctr r0
beq 58f
72: lwzu r9,4(r4) /* do some words */
73: stwu r9,4(r6)
bdnz 72b
58: srwi. r0,r5,LG_CACHELINE_BYTES /* # complete cachelines */
clrlwi r5,r5,32-LG_CACHELINE_BYTES
li r11,4
beq 63f
/* Here we decide how far ahead to prefetch the source */
#if MAX_COPY_PREFETCH > 1
/* Heuristically, for large transfers we prefetch
MAX_COPY_PREFETCH cachelines ahead. For small transfers
we prefetch 1 cacheline ahead. */
cmpwi r0,MAX_COPY_PREFETCH
li r7,1
li r3,4
ble 111f
li r7,MAX_COPY_PREFETCH
111: mtctr r7
112: dcbt r3,r4
addi r3,r3,CACHELINE_BYTES
bdnz 112b
#else /* MAX_COPY_PREFETCH == 1 */
li r3,CACHELINE_BYTES + 4
dcbt r11,r4
#endif /* MAX_COPY_PREFETCH */
mtctr r0
53:
dcbt r3,r4
dcbz r11,r6
/* had to move these to keep extable in order */
.section __ex_table,"a"
.align 3
.llong 70b,100f
.llong 71b,101f
.llong 72b,102f
.llong 73b,103f
.llong 53b,105f
.text
/* the main body of the cacheline loop */
COPY_16_BYTES_WITHEX(0)
#if CACHE_LINE_SIZE >= 32
COPY_16_BYTES_WITHEX(1)
#if CACHE_LINE_SIZE >= 64
COPY_16_BYTES_WITHEX(2)
COPY_16_BYTES_WITHEX(3)
#if CACHE_LINE_SIZE >= 128
COPY_16_BYTES_WITHEX(4)
COPY_16_BYTES_WITHEX(5)
COPY_16_BYTES_WITHEX(6)
COPY_16_BYTES_WITHEX(7)
#endif
#endif
#endif
bdnz 53b
63: srwi. r0,r5,2
mtctr r0
beq 64f
30: lwzu r0,4(r4)
31: stwu r0,4(r6)
bdnz 30b
64: andi. r0,r5,3
mtctr r0
beq+ 65f
40: lbz r0,4(r4)
41: stb r0,4(r6)
addi r4,r4,1
addi r6,r6,1
bdnz 40b
65: li r3,0
blr
/* read fault, initial single-byte copy */
100: li r4,0
b 90f
/* write fault, initial single-byte copy */
101: li r4,1
90: subf r5,r8,r5
li r3,0
b 99f
/* read fault, initial word copy */
102: li r4,0
b 91f
/* write fault, initial word copy */
103: li r4,1
91: li r3,2
b 99f
/*
* this stuff handles faults in the cacheline loop and branches to either
* 104f (if in read part) or 105f (if in write part), after updating r5
*/
COPY_16_BYTES_EXCODE(0)
#if CACHE_LINE_SIZE >= 32
COPY_16_BYTES_EXCODE(1)
#if CACHE_LINE_SIZE >= 64
COPY_16_BYTES_EXCODE(2)
COPY_16_BYTES_EXCODE(3)
#if CACHE_LINE_SIZE >= 128
COPY_16_BYTES_EXCODE(4)
COPY_16_BYTES_EXCODE(5)
COPY_16_BYTES_EXCODE(6)
COPY_16_BYTES_EXCODE(7)
#endif
#endif
#endif
/* read fault in cacheline loop */
104: li r4,0
b 92f
/* fault on dcbz (effectively a write fault) */
/* or write fault in cacheline loop */
105: li r4,1
92: li r3,LG_CACHELINE_BYTES
b 99f
/* read fault in final word loop */
108: li r4,0
b 93f
/* write fault in final word loop */
109: li r4,1
93: andi. r5,r5,3
li r3,2
b 99f
/* read fault in final byte loop */
110: li r4,0
b 94f
/* write fault in final byte loop */
111: li r4,1
94: li r5,0
li r3,0
/*
* At this stage the number of bytes not copied is
* r5 + (ctr << r3), and r4 is 0 for read or 1 for write.
*/
99: mfctr r0
slw r3,r0,r3
add r3,r3,r5
cmpwi 0,r4,0
bne 120f
/* for read fault, clear out the destination: r3 bytes starting at 4(r6) */
srwi. r0,r3,2
li r9,0
mtctr r0
beq 113f
112: stwu r9,4(r6)
bdnz 112b
113: andi. r0,r3,3
mtctr r0
beq 120f
114: stb r9,4(r6)
addi r6,r6,1
bdnz 114b
120: blr
.section __ex_table,"a"
.align 3
.llong 30b,108b
.llong 31b,109b
.llong 40b,110b
.llong 41b,111b
.llong 112b,120b
.llong 114b,120b
.text
_GLOBAL(__clear_user)
addi r6,r3,-4
li r3,0
......
......@@ -38,16 +38,11 @@
#include <asm/ppcdebug.h>
#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
extern void (*debugger)(struct pt_regs *);
extern void (*debugger_fault_handler)(struct pt_regs *);
extern int (*debugger_dabr_match)(struct pt_regs *);
#ifdef CONFIG_DEBUG_KERNEL
int debugger_kernel_faults = 1;
#endif
extern void die_if_kernel(char *, struct pt_regs *, long);
void bad_page_fault(struct pt_regs *, unsigned long);
void do_page_fault(struct pt_regs *, unsigned long, unsigned long);
void bad_page_fault(struct pt_regs *, unsigned long, int);
/*
* For 600- and 800-family processors, the error_code parameter is DSISR
......@@ -71,7 +66,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
if (regs->trap == 0x400)
error_code &= 0x48200000;
#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
#ifdef CONFIG_DEBUG_KERNEL
if (debugger_fault_handler && (regs->trap == 0x300 ||
regs->trap == 0x380)) {
debugger_fault_handler(regs);
......@@ -83,10 +78,10 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
if (debugger_dabr_match(regs))
return;
}
#endif /* CONFIG_XMON || CONFIG_KGDB */
#endif
if (in_interrupt() || mm == NULL) {
bad_page_fault(regs, address);
bad_page_fault(regs, address, SIGSEGV);
return;
}
down_read(&mm->mmap_sem);
......@@ -159,7 +154,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
return;
}
bad_page_fault(regs, address);
bad_page_fault(regs, address, SIGSEGV);
return;
/*
......@@ -176,7 +171,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
printk("VM: killing process %s\n", current->comm);
if (user_mode(regs))
do_exit(SIGKILL);
bad_page_fault(regs, address);
bad_page_fault(regs, address, SIGKILL);
return;
do_sigbus:
......@@ -187,7 +182,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
info.si_addr = (void *)address;
force_sig_info (SIGBUS, &info, current);
if (!user_mode(regs))
bad_page_fault(regs, address);
bad_page_fault(regs, address, SIGBUS);
}
/*
......@@ -196,8 +191,10 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
* in traps.c.
*/
void
bad_page_fault(struct pt_regs *regs, unsigned long address)
bad_page_fault(struct pt_regs *regs, unsigned long address, int sig)
{
extern void die(const char *, struct pt_regs *, long);
unsigned long fixup;
/* Are we prepared to handle this fault? */
......@@ -207,13 +204,9 @@ bad_page_fault(struct pt_regs *regs, unsigned long address)
}
/* kernel has accessed a bad area */
show_regs(regs);
#if defined(CONFIG_XMON) || defined(CONFIG_KGDB)
#ifdef CONFIG_DEBUG_KERNEL
if (debugger_kernel_faults)
debugger(regs);
#endif
print_backtrace( (unsigned long *)regs->gpr[1] );
panic("kernel access of bad area pc %lx lr %lx address %lX tsk %s/%d",
regs->nip,regs->link,address,current->comm,current->pid);
die("Kernel access of bad area", regs, sig);
}
......@@ -85,14 +85,10 @@ extern struct task_struct *current_set[NR_CPUS];
void mm_init_ppc64(void);
unsigned long *pmac_find_end_of_memory(void);
extern unsigned long *find_end_of_memory(void);
extern pgd_t ioremap_dir[];
pgd_t * ioremap_pgd = (pgd_t *)&ioremap_dir;
static void map_io_page(unsigned long va, unsigned long pa, int flags);
extern void die_if_kernel(char *,struct pt_regs *,long);
unsigned long klimit = (unsigned long)_end;
......@@ -246,20 +242,17 @@ static void map_io_page(unsigned long ea, unsigned long pa, int flags)
void
flush_tlb_mm(struct mm_struct *mm)
{
if (mm->map_count) {
struct vm_area_struct *mp;
spin_lock(&mm->page_table_lock);
for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
__flush_tlb_range(mm, mp->vm_start, mp->vm_end);
} else {
/* MIKEC: It is not clear why this is needed */
/* paulus: it is needed to clear out stale HPTEs
* when an address space (represented by an mm_struct)
* is being destroyed. */
__flush_tlb_range(mm, USER_START, USER_END);
}
/* XXX are there races with checking cpu_vm_mask? - Anton */
mm->cpu_vm_mask = 0;
spin_unlock(&mm->page_table_lock);
}
/*
......@@ -399,47 +392,40 @@ __flush_tlb_range(struct mm_struct *mm, unsigned long start, unsigned long end)
flush_hash_range(context, i, local);
}
void __init free_initmem(void)
void free_initmem(void)
{
unsigned long a;
unsigned long num_freed_pages = 0;
#define FREESEC(START,END,CNT) do { \
a = (unsigned long)(&START); \
for (; a < (unsigned long)(&END); a += PAGE_SIZE) { \
clear_bit(PG_reserved, &mem_map[MAP_NR(a)].flags); \
set_page_count(mem_map+MAP_NR(a), 1); \
free_page(a); \
CNT++; \
} \
} while (0)
FREESEC(__init_begin,__init_end,num_freed_pages);
printk ("Freeing unused kernel memory: %ldk init\n",
PGTOKB(num_freed_pages));
unsigned long addr;
addr = (unsigned long)(&__init_begin);
for (; addr < (unsigned long)(&__init_end); addr += PAGE_SIZE) {
ClearPageReserved(virt_to_page(addr));
set_page_count(virt_to_page(addr), 1);
free_page(addr);
totalram_pages++;
}
printk ("Freeing unused kernel memory: %dk freed\n",
(&__init_end - &__init_begin) >> 10);
}
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
unsigned long xstart = start;
if (start < end)
printk ("Freeing initrd memory: %ldk freed\n", (end - start) >> 10);
for (; start < end; start += PAGE_SIZE) {
ClearPageReserved(mem_map + MAP_NR(start));
set_page_count(mem_map+MAP_NR(start), 1);
ClearPageReserved(virt_to_page(start));
set_page_count(virt_to_page(start), 1);
free_page(start);
totalram_pages++;
}
printk ("Freeing initrd memory: %ldk freed\n", (end - xstart) >> 10);
}
#endif
/*
* Do very early mm setup.
*/
void __init mm_init_ppc64(void) {
void __init mm_init_ppc64(void)
{
struct paca_struct *lpaca;
unsigned long guard_page, index;
......@@ -467,8 +453,6 @@ void __init mm_init_ppc64(void) {
ppc_md.progress("MM:exit", 0x211);
}
/*
* Initialize the bootmem system and give it all the memory we
* have available.
......@@ -582,11 +566,11 @@ void __init mem_init(void)
for (addr = (unsigned long)sysmap;
addr < PAGE_ALIGN((unsigned long)sysmap+sysmap_size) ;
addr += PAGE_SIZE)
SetPageReserved(mem_map + MAP_NR(addr));
SetPageReserved(virt_to_page(addr));
for (addr = KERNELBASE; addr <= (unsigned long)__va(lmb_end_of_DRAM());
addr += PAGE_SIZE) {
if (!PageReserved(mem_map + MAP_NR(addr)))
if (!PageReserved(virt_to_page(addr)))
continue;
if (addr < (ulong) etext)
codepages++;
......@@ -665,6 +649,8 @@ int __hash_page(unsigned long ea, unsigned long access, unsigned long vsid,
* fault has been handled by updating a PTE in the linux page tables.
* We use it to preload an HPTE into the hash table corresponding to
* the updated linux PTE.
*
* This must always be called with the mm->page_table_lock held
*/
void update_mmu_cache(struct vm_area_struct *vma, unsigned long ea,
pte_t pte)
......
......@@ -309,7 +309,6 @@ xmon(struct pt_regs *excp)
std 29,232(%0)\n\
std 30,240(%0)\n\
std 31,248(%0)" : : "b" (&regs));
printf("xmon called\n");
/* Fetch the link reg for this stack frame.
NOTE: the prev printf fills in the lr. */
regs.nip = regs.link = ((unsigned long *)(regs.gpr[1]))[2];
......
......@@ -26,7 +26,7 @@
#define O_DIRECTORY 040000 /* must be a directory */
#define O_NOFOLLOW 0100000 /* don't follow links */
#define O_LARGEFILE 0200000
#define O_DIRECT 0400000 /* direct disk access hint - currently ignored */
#define O_DIRECT 0400000 /* direct disk access hint */
#define F_DUPFD 0 /* dup */
#define F_GETFD 1 /* get close_on_exec */
......
......@@ -26,7 +26,7 @@ typedef struct {
#endif
unsigned int __local_bh_count;
unsigned int __syscall_count;
unsigned long __unused;
unsigned long idle_timestamp;
struct task_struct * __ksoftirqd_task;
} ____cacheline_aligned irq_cpustat_t;
......
......@@ -50,7 +50,7 @@ static inline void isync(void)
#define HMT_LOW "\tor 1,1,1 # low priority\n"
#define HMT_MEDIUM "\tor 2,2,2 # medium priority\n"
#define HMT_MEDIUM "\tor 3,3,3 # high priority\n"
#define HMT_HIGH "\tor 3,3,3 # high priority\n"
#else
#define HMT_low() do { } while(0)
#define HMT_medium() do { } while(0)
......
......@@ -211,18 +211,11 @@ static inline void _tlbie(unsigned long va, int large)
asm volatile("eieio; tlbsync; ptesync": : :"memory");
}
static inline void _tlbiel(unsigned long va, int large)
static inline void _tlbiel(unsigned long va)
{
asm volatile("ptesync": : :"memory");
if (large) {
asm volatile("clrldi %0,%0,16\n\
tlbiel %0,1" : : "r"(va) : "memory");
} else {
asm volatile("clrldi %0,%0,16\n\
tlbiel %0,0" : : "r"(va) : "memory");
}
tlbiel %0" : : "r"(va) : "memory");
asm volatile("ptesync": : :"memory");
}
......
......@@ -85,7 +85,8 @@ struct paca_struct {
u8 xProcEnabled; /* 1=soft enabled 0x78 */
u8 xHrdIntCount; /* Count of active hardware interrupts 0x79 */
u8 prof_enabled; /* 1=iSeries profiling enabled 0x7A */
u8 resv1[5]; /* 0x7B-0x7F */
u8 stab_cache_pointer;
u8 resv1[4]; /* 0x7B-0x7F */
/*=====================================================================================
* CACHE_LINE_2 0x0080 - 0x00FF
......
......@@ -215,11 +215,12 @@ static inline int get_order(unsigned long size)
#define __a2p(x) ((void *) absolute_to_phys(x))
#define __a2v(x) ((void *) __va(absolute_to_phys(x)))
#define virt_to_page(kaddr) (mem_map+(__pa((unsigned long)kaddr) >> PAGE_SHIFT))
#define pfn_to_page(pfn) (mem_map + (pfn))
#define page_to_pfn(pfn) ((unsigned long)((pfn) - mem_map))
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define VALID_PAGE(page) ((page - mem_map) < max_mapnr)
#define MAP_NR(addr) (__pa(addr) >> PAGE_SHIFT)
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
......
......@@ -155,7 +155,7 @@
* for zero-mapped memory areas etc..
*/
extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
#define ZERO_PAGE(vaddr) (mem_map + MAP_NR(empty_zero_page))
#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
#endif /* __ASSEMBLY__ */
/* shift to put page number into pte */
......@@ -167,22 +167,14 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
* Conversion functions: convert a page and protection to a page entry,
* and a page entry and page directory to the page they refer to.
*
* mk_pte_phys takes a physical address as input
*
* mk_pte takes a (struct page *) as input
*/
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
#define mk_pte_phys(physpage,pgprot) \
({ \
pte_t pte; \
pte_val(pte) = (((physpage)<<(PTE_SHIFT-PAGE_SHIFT)) | pgprot_val(pgprot)); \
pte; \
})
#define mk_pte(page,pgprot) \
#define pfn_pte(pfn,pgprot) \
({ \
pte_t pte; \
pte_val(pte) = ((unsigned long)((page) - mem_map) << PTE_SHIFT) | \
pte_val(pte) = ((unsigned long)(pfn) << PTE_SHIFT) | \
pgprot_val(pgprot); \
pte; \
})
......@@ -195,8 +187,8 @@ extern unsigned long empty_zero_page[PAGE_SIZE/sizeof(unsigned long)];
/* pte_clear moved to later in this file */
#define pte_pagenr(x) ((unsigned long)((pte_val(x) >> PTE_SHIFT)))
#define pte_page(x) (mem_map+pte_pagenr(x))
#define pte_pfn(x) ((unsigned long)((pte_val(x) >> PTE_SHIFT)))
#define pte_page(x) pfn_to_page(pte_pfn(x))
#define pmd_set(pmdp, ptep) (pmd_val(*(pmdp)) = (__ba_to_bpn(ptep)))
#define pmd_none(pmd) (!pmd_val(pmd))
......
......@@ -97,7 +97,7 @@
#define FPSCR_VX 0x20000000 /* Invalid operation summary */
#define FPSCR_OX 0x10000000 /* Overflow exception summary */
#define FPSCR_UX 0x08000000 /* Underflow exception summary */
#define FPSCR_ZX 0x04000000 /* Zero-devide exception summary */
#define FPSCR_ZX 0x04000000 /* Zero-divide exception summary */
#define FPSCR_XX 0x02000000 /* Inexact exception summary */
#define FPSCR_VXSNAN 0x01000000 /* Invalid op for SNaN */
#define FPSCR_VXISI 0x00800000 /* Invalid op for Inv - Inv */
......@@ -651,9 +651,7 @@ struct thread_struct {
unsigned long ksp; /* Kernel stack pointer */
struct pt_regs *regs; /* Pointer to saved register state */
mm_segment_t fs; /* for get_fs() validation */
signed long last_syscall;
double fpr[32]; /* Complete floating point set */
unsigned long fpscr_pad; /* fpr ... fpscr must be contiguous */
unsigned long fpscr; /* Floating point status */
};
......@@ -663,8 +661,8 @@ struct thread_struct {
INIT_SP, /* ksp */ \
(struct pt_regs *)INIT_SP - 1, /* regs */ \
KERNEL_DS, /*fs*/ \
0, /* last_syscall */ \
{0}, 0, 0 \
{0}, /* fpr */ \
0 /* fpscr */ \
}
/*
......
......@@ -55,9 +55,31 @@
#define smp_wmb() __asm__ __volatile__("": : :"memory")
#endif /* CONFIG_SMP */
#ifdef CONFIG_DEBUG_KERNEL
extern void (*debugger)(struct pt_regs *regs);
extern int (*debugger_bpt)(struct pt_regs *regs);
extern int (*debugger_sstep)(struct pt_regs *regs);
extern int (*debugger_iabr_match)(struct pt_regs *regs);
extern int (*debugger_dabr_match)(struct pt_regs *regs);
extern void (*debugger_fault_handler)(struct pt_regs *regs);
#else
#define debugger(regs) do { } while (0)
#define debugger_bpt(regs) 0
#define debugger_sstep(regs) 0
#define debugger_iabr_match(regs) 0
#define debugger_dabr_match(regs) 0
#define debugger_fault_handler ((void (*)(struct pt_regs *))0)
#endif
#ifdef CONFIG_XMON
extern void xmon_irq(int, void *, struct pt_regs *);
extern void xmon(struct pt_regs *excp);
extern void xmon(struct pt_regs *regs);
extern int xmon_bpt(struct pt_regs *regs);
extern int xmon_sstep(struct pt_regs *regs);
extern int xmon_iabr_match(struct pt_regs *regs);
extern int xmon_dabr_match(struct pt_regs *regs);
extern void (*xmon_fault_handler)(struct pt_regs *regs);
#endif
extern void print_backtrace(unsigned long *);
......
......@@ -65,6 +65,8 @@ static inline struct thread_info *current_thread_info(void)
#endif /* __ASSEMBLY__ */
#define PREEMPT_ACTIVE 0x4000000
/*
* thread information flag bit numbers
*/
......
......@@ -18,7 +18,6 @@
<< (SHIFT_SCALE-SHIFT_HZ)) / HZ)
typedef unsigned long cycles_t;
extern cycles_t cacheflush_time;
static inline cycles_t get_cycles(void)
{
......
......@@ -230,6 +230,9 @@
#define __NR_lremovexattr 219
#define __NR_fremovexattr 220
#define __NR_futex 221
#define __NR_tux 222
#define __NR_sched_setaffinity 223
#define __NR_sched_getaffinity 224
#if 0
/* Remind paulus to add these into ppc32 */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment