Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Support
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in / Register
Toggle navigation
L
linux
Project overview
Project overview
Details
Activity
Releases
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Issues
0
Issues
0
List
Boards
Labels
Milestones
Merge Requests
0
Merge Requests
0
Analytics
Analytics
Repository
Value Stream
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Create a new issue
Commits
Issue Boards
Open sidebar
nexedi
linux
Commits
0174f72f
Commit
0174f72f
authored
Nov 14, 2005
by
Linus Torvalds
Browse files
Options
Browse Files
Download
Plain Diff
Merge
git://git.kernel.org/pub/scm/linux/kernel/git/paulus/powerpc-merge
parents
302fe175
ba76cd57
Changes
66
Show whitespace changes
Inline
Side-by-side
Showing
66 changed files
with
868 additions
and
10738 deletions
+868
-10738
arch/powerpc/Kconfig
arch/powerpc/Kconfig
+3
-2
arch/powerpc/Makefile
arch/powerpc/Makefile
+1
-1
arch/powerpc/configs/pseries_defconfig
arch/powerpc/configs/pseries_defconfig
+131
-75
arch/powerpc/kernel/Makefile
arch/powerpc/kernel/Makefile
+14
-6
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/asm-offsets.c
+4
-2
arch/powerpc/kernel/dma_64.c
arch/powerpc/kernel/dma_64.c
+0
-0
arch/powerpc/kernel/iomap.c
arch/powerpc/kernel/iomap.c
+0
-0
arch/powerpc/kernel/iommu.c
arch/powerpc/kernel/iommu.c
+0
-0
arch/powerpc/kernel/irq.c
arch/powerpc/kernel/irq.c
+5
-4
arch/powerpc/kernel/kprobes.c
arch/powerpc/kernel/kprobes.c
+0
-0
arch/powerpc/kernel/lparcfg.c
arch/powerpc/kernel/lparcfg.c
+25
-26
arch/powerpc/kernel/machine_kexec_64.c
arch/powerpc/kernel/machine_kexec_64.c
+57
-6
arch/powerpc/kernel/module_64.c
arch/powerpc/kernel/module_64.c
+0
-0
arch/powerpc/kernel/pci_64.c
arch/powerpc/kernel/pci_64.c
+0
-0
arch/powerpc/kernel/pci_direct_iommu.c
arch/powerpc/kernel/pci_direct_iommu.c
+0
-0
arch/powerpc/kernel/pci_dn.c
arch/powerpc/kernel/pci_dn.c
+0
-0
arch/powerpc/kernel/pci_iommu.c
arch/powerpc/kernel/pci_iommu.c
+0
-0
arch/powerpc/kernel/prom.c
arch/powerpc/kernel/prom.c
+2
-0
arch/powerpc/kernel/rtas-rtc.c
arch/powerpc/kernel/rtas-rtc.c
+105
-0
arch/powerpc/kernel/setup_32.c
arch/powerpc/kernel/setup_32.c
+0
-4
arch/powerpc/kernel/setup_64.c
arch/powerpc/kernel/setup_64.c
+5
-0
arch/powerpc/kernel/signal_32.c
arch/powerpc/kernel/signal_32.c
+5
-2
arch/powerpc/kernel/signal_64.c
arch/powerpc/kernel/signal_64.c
+3
-3
arch/powerpc/kernel/vdso32/datapage.S
arch/powerpc/kernel/vdso32/datapage.S
+2
-1
arch/powerpc/kernel/vdso32/gettimeofday.S
arch/powerpc/kernel/vdso32/gettimeofday.S
+8
-4
arch/powerpc/kernel/vdso64/datapage.S
arch/powerpc/kernel/vdso64/datapage.S
+1
-0
arch/powerpc/kernel/vdso64/gettimeofday.S
arch/powerpc/kernel/vdso64/gettimeofday.S
+19
-12
arch/powerpc/platforms/iseries/irq.c
arch/powerpc/platforms/iseries/irq.c
+3
-22
arch/powerpc/platforms/iseries/setup.c
arch/powerpc/platforms/iseries/setup.c
+0
-6
arch/powerpc/platforms/powermac/time.c
arch/powerpc/platforms/powermac/time.c
+5
-4
arch/powerpc/platforms/pseries/Makefile
arch/powerpc/platforms/pseries/Makefile
+4
-1
arch/powerpc/platforms/pseries/hvconsole.c
arch/powerpc/platforms/pseries/hvconsole.c
+0
-0
arch/powerpc/platforms/pseries/hvcserver.c
arch/powerpc/platforms/pseries/hvcserver.c
+0
-0
arch/powerpc/platforms/pseries/setup.c
arch/powerpc/platforms/pseries/setup.c
+24
-2
arch/ppc64/Kconfig
arch/ppc64/Kconfig
+0
-520
arch/ppc64/kernel/Makefile
arch/ppc64/kernel/Makefile
+1
-40
arch/ppc64/kernel/asm-offsets.c
arch/ppc64/kernel/asm-offsets.c
+0
-195
arch/ppc64/kernel/btext.c
arch/ppc64/kernel/btext.c
+0
-792
arch/ppc64/kernel/head.S
arch/ppc64/kernel/head.S
+0
-2007
arch/ppc64/kernel/misc.S
arch/ppc64/kernel/misc.S
+0
-940
arch/ppc64/kernel/ppc_ksyms.c
arch/ppc64/kernel/ppc_ksyms.c
+0
-76
arch/ppc64/kernel/prom.c
arch/ppc64/kernel/prom.c
+0
-1956
arch/ppc64/kernel/prom_init.c
arch/ppc64/kernel/prom_init.c
+0
-2051
arch/ppc64/kernel/rtc.c
arch/ppc64/kernel/rtc.c
+0
-358
arch/ppc64/kernel/semaphore.c
arch/ppc64/kernel/semaphore.c
+0
-136
arch/ppc64/kernel/vdso.c
arch/ppc64/kernel/vdso.c
+0
-625
arch/ppc64/kernel/vmlinux.lds.S
arch/ppc64/kernel/vmlinux.lds.S
+0
-151
arch/ppc64/xmon/privinst.h
arch/ppc64/xmon/privinst.h
+0
-64
drivers/char/Kconfig
drivers/char/Kconfig
+1
-1
include/asm-powerpc/btext.h
include/asm-powerpc/btext.h
+0
-0
include/asm-powerpc/delay.h
include/asm-powerpc/delay.h
+13
-6
include/asm-powerpc/eeh.h
include/asm-powerpc/eeh.h
+0
-0
include/asm-powerpc/floppy.h
include/asm-powerpc/floppy.h
+12
-13
include/asm-powerpc/hvconsole.h
include/asm-powerpc/hvconsole.h
+0
-0
include/asm-powerpc/hvcserver.h
include/asm-powerpc/hvcserver.h
+0
-0
include/asm-powerpc/kexec.h
include/asm-powerpc/kexec.h
+1
-0
include/asm-powerpc/machdep.h
include/asm-powerpc/machdep.h
+3
-1
include/asm-powerpc/nvram.h
include/asm-powerpc/nvram.h
+10
-7
include/asm-powerpc/page.h
include/asm-powerpc/page.h
+179
-0
include/asm-powerpc/page_32.h
include/asm-powerpc/page_32.h
+40
-0
include/asm-powerpc/page_64.h
include/asm-powerpc/page_64.h
+174
-0
include/asm-powerpc/serial.h
include/asm-powerpc/serial.h
+7
-12
include/asm-powerpc/vdso_datapage.h
include/asm-powerpc/vdso_datapage.h
+1
-1
include/asm-ppc/nvram.h
include/asm-ppc/nvram.h
+0
-73
include/asm-ppc64/prom.h
include/asm-ppc64/prom.h
+0
-220
include/asm-ppc64/system.h
include/asm-ppc64/system.h
+0
-310
No files found.
arch/powerpc/Kconfig
View file @
0174f72f
...
@@ -261,7 +261,7 @@ config PPC_ISERIES
...
@@ -261,7 +261,7 @@ config PPC_ISERIES
config EMBEDDED6xx
config EMBEDDED6xx
bool "Embedded 6xx/7xx/7xxx-based board"
bool "Embedded 6xx/7xx/7xxx-based board"
depends on PPC32
depends on PPC32
&& BROKEN
config APUS
config APUS
bool "Amiga-APUS"
bool "Amiga-APUS"
...
@@ -305,7 +305,7 @@ config PPC_PMAC64
...
@@ -305,7 +305,7 @@ config PPC_PMAC64
config PPC_PREP
config PPC_PREP
bool " PowerPC Reference Platform (PReP) based machines"
bool " PowerPC Reference Platform (PReP) based machines"
depends on PPC_MULTIPLATFORM && PPC32
depends on PPC_MULTIPLATFORM && PPC32
&& BROKEN
select PPC_I8259
select PPC_I8259
select PPC_INDIRECT_PCI
select PPC_INDIRECT_PCI
default y
default y
...
@@ -932,6 +932,7 @@ source "arch/powerpc/oprofile/Kconfig"
...
@@ -932,6 +932,7 @@ source "arch/powerpc/oprofile/Kconfig"
config KPROBES
config KPROBES
bool "Kprobes (EXPERIMENTAL)"
bool "Kprobes (EXPERIMENTAL)"
depends on PPC64
help
help
Kprobes allows you to trap at almost any kernel address and
Kprobes allows you to trap at almost any kernel address and
execute a callback function. register_kprobe() establishes
execute a callback function. register_kprobe() establishes
...
...
arch/powerpc/Makefile
View file @
0174f72f
...
@@ -187,7 +187,7 @@ archprepare: checkbin
...
@@ -187,7 +187,7 @@ archprepare: checkbin
# Temporary hack until we have migrated to asm-powerpc
# Temporary hack until we have migrated to asm-powerpc
include/asm
:
arch/$(ARCH)/include/asm
include/asm
:
arch/$(ARCH)/include/asm
arch/$(ARCH)/include/asm
:
arch/$(ARCH)/include/asm
:
FORCE
$(Q)
if
[
!
-d
arch
/
$(ARCH)
/include
]
;
then
mkdir
-p
arch
/
$(ARCH)
/include
;
fi
$(Q)
if
[
!
-d
arch
/
$(ARCH)
/include
]
;
then
mkdir
-p
arch
/
$(ARCH)
/include
;
fi
$(Q)
ln
-fsn
$(srctree)
/include/asm-
$(OLDARCH)
arch
/
$(ARCH)
/include/asm
$(Q)
ln
-fsn
$(srctree)
/include/asm-
$(OLDARCH)
arch
/
$(ARCH)
/include/asm
...
...
arch/powerpc/configs/pseries_defconfig
View file @
0174f72f
#
#
# Automatically generated make config: don't edit
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.1
4-rc4
# Linux kernel version: 2.6.1
5-rc1
#
Thu Oct 20 08:32:17
2005
#
Mon Nov 14 15:27:00
2005
#
#
CONFIG_PPC64=y
CONFIG_64BIT=y
CONFIG_64BIT=y
CONFIG_PPC_MERGE=y
CONFIG_MMU=y
CONFIG_MMU=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
CONFIG_RWSEM_XCHGADD_ALGORITHM=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_
GENERIC_ISA_DMA
=y
CONFIG_
PPC
=y
CONFIG_EARLY_PRINTK=y
CONFIG_EARLY_PRINTK=y
CONFIG_COMPAT=y
CONFIG_COMPAT=y
CONFIG_SYSVIPC_COMPAT=y
CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
CONFIG_SCHED_NO_NO_OMIT_FRAME_POINTER=y
CONFIG_ARCH_MAY_HAVE_PC_FDC=y
CONFIG_ARCH_MAY_HAVE_PC_FDC=y
CONFIG_FORCE_MAX_ZONEORDER=13
#
# Processor support
#
# CONFIG_POWER4_ONLY is not set
CONFIG_POWER3=y
CONFIG_POWER4=y
CONFIG_PPC_FPU=y
CONFIG_ALTIVEC=y
CONFIG_PPC_STD_MMU=y
CONFIG_SMP=y
CONFIG_NR_CPUS=128
#
#
# Code maturity level options
# Code maturity level options
...
@@ -68,75 +83,103 @@ CONFIG_MODVERSIONS=y
...
@@ -68,75 +83,103 @@ CONFIG_MODVERSIONS=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_MODULE_SRCVERSION_ALL=y
CONFIG_KMOD=y
CONFIG_KMOD=y
CONFIG_STOP_MACHINE=y
CONFIG_STOP_MACHINE=y
CONFIG_SYSVIPC_COMPAT=y
#
# Block layer
#
#
# IO Schedulers
#
CONFIG_IOSCHED_NOOP=y
CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
CONFIG_IOSCHED_CFQ=y
CONFIG_DEFAULT_AS=y
# CONFIG_DEFAULT_DEADLINE is not set
# CONFIG_DEFAULT_CFQ is not set
# CONFIG_DEFAULT_NOOP is not set
CONFIG_DEFAULT_IOSCHED="anticipatory"
#
#
# Platform support
# Platform support
#
#
# CONFIG_PPC_ISERIES is not set
CONFIG_PPC_MULTIPLATFORM=y
CONFIG_PPC_MULTIPLATFORM=y
# CONFIG_PPC_ISERIES is not set
# CONFIG_EMBEDDED6xx is not set
# CONFIG_APUS is not set
CONFIG_PPC_PSERIES=y
CONFIG_PPC_PSERIES=y
# CONFIG_PPC_BPA is not set
# CONFIG_PPC_PMAC is not set
# CONFIG_PPC_PMAC is not set
# CONFIG_PPC_MAPLE is not set
# CONFIG_PPC_MAPLE is not set
CONFIG_PPC=y
# CONFIG_PPC_CELL is not set
CONFIG_PPC64=y
CONFIG_PPC_OF=y
CONFIG_PPC_OF=y
CONFIG_XICS=y
CONFIG_XICS=y
# CONFIG_U3_DART is not set
CONFIG_MPIC=y
CONFIG_MPIC=y
CONFIG_ALTIVEC=y
CONFIG_PPC_RTAS=y
CONFIG_PPC_SPLPAR=y
CONFIG_RTAS_ERROR_LOGGING=y
CONFIG_KEXEC=y
CONFIG_RTAS_PROC=y
CONFIG_RTAS_FLASH=m
# CONFIG_MMIO_NVRAM is not set
CONFIG_IBMVIO=y
CONFIG_IBMVIO=y
# CONFIG_U3_DART is not set
# CONFIG_PPC_MPC106 is not set
# CONFIG_BOOTX_TEXT is not set
# CONFIG_GENERIC_TBSYNC is not set
# CONFIG_POWER4_ONLY is not set
# CONFIG_CPU_FREQ is not set
# CONFIG_WANT_EARLY_SERIAL is not set
#
# Kernel options
#
# CONFIG_HZ_100 is not set
CONFIG_HZ_250=y
# CONFIG_HZ_1000 is not set
CONFIG_HZ=250
CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
# CONFIG_PREEMPT_BKL is not set
CONFIG_BINFMT_ELF=y
# CONFIG_BINFMT_MISC is not set
CONFIG_FORCE_MAX_ZONEORDER=13
CONFIG_IOMMU_VMERGE=y
CONFIG_IOMMU_VMERGE=y
CONFIG_SMP=y
CONFIG_HOTPLUG_CPU=y
CONFIG_NR_CPUS=128
CONFIG_KEXEC=y
# CONFIG_IRQ_ALL_CPUS is not set
CONFIG_PPC_SPLPAR=y
CONFIG_EEH=y
CONFIG_SCANLOG=m
CONFIG_LPARCFG=y
CONFIG_NUMA=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
CONFIG_ARCH_SELECT_MEMORY_MODEL=y
CONFIG_ARCH_FLATMEM_ENABLE=y
CONFIG_ARCH_DISCONTIGMEM_ENABLE=y
CONFIG_ARCH_DISCONTIGMEM_DEFAULT=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_ENABLE=y
CONFIG_ARCH_SPARSEMEM_DEFAULT=y
CONFIG_SELECT_MEMORY_MODEL=y
CONFIG_SELECT_MEMORY_MODEL=y
# CONFIG_FLATMEM_MANUAL is not set
# CONFIG_FLATMEM_MANUAL is not set
CONFIG_DISCONTIGMEM_MANUAL=y
# CONFIG_DISCONTIGMEM_MANUAL is not set
# CONFIG_SPARSEMEM_MANUAL is not set
CONFIG_SPARSEMEM_MANUAL=y
CONFIG_DISCONTIGMEM=y
CONFIG_SPARSEMEM=y
CONFIG_FLAT_NODE_MEM_MAP=y
CONFIG_NEED_MULTIPLE_NODES=y
CONFIG_NEED_MULTIPLE_NODES=y
CONFIG_HAVE_MEMORY_PRESENT=y
# CONFIG_SPARSEMEM_STATIC is not set
# CONFIG_SPARSEMEM_STATIC is not set
CONFIG_SPARSEMEM_EXTREME=y
# CONFIG_MEMORY_HOTPLUG is not set
CONFIG_SPLIT_PTLOCK_CPUS=4096
CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
CONFIG_HAVE_ARCH_EARLY_PFN_TO_NID=y
CONFIG_NODES_SPAN_OTHER_NODES=y
CONFIG_NODES_SPAN_OTHER_NODES=y
CONFIG_NUMA=y
# CONFIG_PPC_64K_PAGES is not set
CONFIG_SCHED_SMT=y
CONFIG_SCHED_SMT=y
CONFIG_PREEMPT_NONE=y
# CONFIG_PREEMPT_VOLUNTARY is not set
# CONFIG_PREEMPT is not set
# CONFIG_PREEMPT_BKL is not set
# CONFIG_HZ_100 is not set
CONFIG_HZ_250=y
# CONFIG_HZ_1000 is not set
CONFIG_HZ=250
CONFIG_EEH=y
CONFIG_GENERIC_HARDIRQS=y
CONFIG_PPC_RTAS=y
CONFIG_RTAS_PROC=y
CONFIG_RTAS_FLASH=m
CONFIG_SCANLOG=m
CONFIG_LPARCFG=y
CONFIG_SECCOMP=y
CONFIG_BINFMT_ELF=y
# CONFIG_BINFMT_MISC is not set
CONFIG_HOTPLUG_CPU=y
CONFIG_PROC_DEVICETREE=y
CONFIG_PROC_DEVICETREE=y
# CONFIG_CMDLINE_BOOL is not set
# CONFIG_CMDLINE_BOOL is not set
# CONFIG_PM is not set
CONFIG_SECCOMP=y
CONFIG_ISA_DMA_API=y
CONFIG_ISA_DMA_API=y
#
#
# Bus
O
ptions
# Bus
o
ptions
#
#
CONFIG_GENERIC_ISA_DMA=y
CONFIG_PPC_I8259=y
# CONFIG_PPC_INDIRECT_PCI is not set
CONFIG_PCI=y
CONFIG_PCI=y
CONFIG_PCI_DOMAINS=y
CONFIG_PCI_DOMAINS=y
CONFIG_PCI_LEGACY_PROC=y
CONFIG_PCI_LEGACY_PROC=y
...
@@ -156,6 +199,7 @@ CONFIG_HOTPLUG_PCI=m
...
@@ -156,6 +199,7 @@ CONFIG_HOTPLUG_PCI=m
# CONFIG_HOTPLUG_PCI_SHPC is not set
# CONFIG_HOTPLUG_PCI_SHPC is not set
CONFIG_HOTPLUG_PCI_RPA=m
CONFIG_HOTPLUG_PCI_RPA=m
CONFIG_HOTPLUG_PCI_RPA_DLPAR=m
CONFIG_HOTPLUG_PCI_RPA_DLPAR=m
CONFIG_KERNEL_START=0xc000000000000000
#
#
# Networking
# Networking
...
@@ -197,6 +241,10 @@ CONFIG_TCP_CONG_BIC=y
...
@@ -197,6 +241,10 @@ CONFIG_TCP_CONG_BIC=y
# CONFIG_IPV6 is not set
# CONFIG_IPV6 is not set
CONFIG_NETFILTER=y
CONFIG_NETFILTER=y
# CONFIG_NETFILTER_DEBUG is not set
# CONFIG_NETFILTER_DEBUG is not set
#
# Core Netfilter Configuration
#
CONFIG_NETFILTER_NETLINK=y
CONFIG_NETFILTER_NETLINK=y
CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NETFILTER_NETLINK_QUEUE=m
CONFIG_NETFILTER_NETLINK_LOG=m
CONFIG_NETFILTER_NETLINK_LOG=m
...
@@ -299,6 +347,10 @@ CONFIG_LLC=y
...
@@ -299,6 +347,10 @@ CONFIG_LLC=y
# CONFIG_NET_DIVERT is not set
# CONFIG_NET_DIVERT is not set
# CONFIG_ECONET is not set
# CONFIG_ECONET is not set
# CONFIG_WAN_ROUTER is not set
# CONFIG_WAN_ROUTER is not set
#
# QoS and/or fair queueing
#
# CONFIG_NET_SCHED is not set
# CONFIG_NET_SCHED is not set
CONFIG_NET_CLS_ROUTE=y
CONFIG_NET_CLS_ROUTE=y
...
@@ -368,14 +420,6 @@ CONFIG_BLK_DEV_RAM_COUNT=16
...
@@ -368,14 +420,6 @@ CONFIG_BLK_DEV_RAM_COUNT=16
CONFIG_BLK_DEV_RAM_SIZE=65536
CONFIG_BLK_DEV_RAM_SIZE=65536
CONFIG_BLK_DEV_INITRD=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_CDROM_PKTCDVD is not set
# CONFIG_CDROM_PKTCDVD is not set
#
# IO Schedulers
#
CONFIG_IOSCHED_NOOP=y
CONFIG_IOSCHED_AS=y
CONFIG_IOSCHED_DEADLINE=y
CONFIG_IOSCHED_CFQ=y
# CONFIG_ATA_OVER_ETH is not set
# CONFIG_ATA_OVER_ETH is not set
#
#
...
@@ -473,6 +517,7 @@ CONFIG_SCSI_ISCSI_ATTRS=m
...
@@ -473,6 +517,7 @@ CONFIG_SCSI_ISCSI_ATTRS=m
#
#
# SCSI low-level drivers
# SCSI low-level drivers
#
#
# CONFIG_ISCSI_TCP is not set
# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
# CONFIG_BLK_DEV_3W_XXXX_RAID is not set
# CONFIG_SCSI_3W_9XXX is not set
# CONFIG_SCSI_3W_9XXX is not set
# CONFIG_SCSI_ACARD is not set
# CONFIG_SCSI_ACARD is not set
...
@@ -559,6 +604,7 @@ CONFIG_DM_MULTIPATH_EMC=m
...
@@ -559,6 +604,7 @@ CONFIG_DM_MULTIPATH_EMC=m
#
#
# Macintosh device drivers
# Macintosh device drivers
#
#
# CONFIG_WINDFARM is not set
#
#
# Network device support
# Network device support
...
@@ -645,7 +691,6 @@ CONFIG_IXGB=m
...
@@ -645,7 +691,6 @@ CONFIG_IXGB=m
# CONFIG_IXGB_NAPI is not set
# CONFIG_IXGB_NAPI is not set
CONFIG_S2IO=m
CONFIG_S2IO=m
# CONFIG_S2IO_NAPI is not set
# CONFIG_S2IO_NAPI is not set
# CONFIG_2BUFF_MODE is not set
#
#
# Token Ring devices
# Token Ring devices
...
@@ -674,6 +719,7 @@ CONFIG_PPP_ASYNC=m
...
@@ -674,6 +719,7 @@ CONFIG_PPP_ASYNC=m
CONFIG_PPP_SYNC_TTY=m
CONFIG_PPP_SYNC_TTY=m
CONFIG_PPP_DEFLATE=m
CONFIG_PPP_DEFLATE=m
CONFIG_PPP_BSDCOMP=m
CONFIG_PPP_BSDCOMP=m
# CONFIG_PPP_MPPE is not set
CONFIG_PPPOE=m
CONFIG_PPPOE=m
# CONFIG_SLIP is not set
# CONFIG_SLIP is not set
# CONFIG_NET_FC is not set
# CONFIG_NET_FC is not set
...
@@ -784,6 +830,8 @@ CONFIG_HVCS=m
...
@@ -784,6 +830,8 @@ CONFIG_HVCS=m
#
#
# CONFIG_WATCHDOG is not set
# CONFIG_WATCHDOG is not set
# CONFIG_RTC is not set
# CONFIG_RTC is not set
CONFIG_GEN_RTC=y
# CONFIG_GEN_RTC_X is not set
# CONFIG_DTLK is not set
# CONFIG_DTLK is not set
# CONFIG_R3964 is not set
# CONFIG_R3964 is not set
# CONFIG_APPLICOM is not set
# CONFIG_APPLICOM is not set
...
@@ -801,6 +849,7 @@ CONFIG_MAX_RAW_DEVS=1024
...
@@ -801,6 +849,7 @@ CONFIG_MAX_RAW_DEVS=1024
# TPM devices
# TPM devices
#
#
# CONFIG_TCG_TPM is not set
# CONFIG_TCG_TPM is not set
# CONFIG_TELCLOCK is not set
#
#
# I2C support
# I2C support
...
@@ -852,6 +901,7 @@ CONFIG_I2C_ALGOBIT=y
...
@@ -852,6 +901,7 @@ CONFIG_I2C_ALGOBIT=y
# CONFIG_SENSORS_PCF8591 is not set
# CONFIG_SENSORS_PCF8591 is not set
# CONFIG_SENSORS_RTC8564 is not set
# CONFIG_SENSORS_RTC8564 is not set
# CONFIG_SENSORS_MAX6875 is not set
# CONFIG_SENSORS_MAX6875 is not set
# CONFIG_RTC_X1205_I2C is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_CORE is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_ALGO is not set
# CONFIG_I2C_DEBUG_BUS is not set
# CONFIG_I2C_DEBUG_BUS is not set
...
@@ -893,7 +943,6 @@ CONFIG_FB=y
...
@@ -893,7 +943,6 @@ CONFIG_FB=y
CONFIG_FB_CFB_FILLRECT=y
CONFIG_FB_CFB_FILLRECT=y
CONFIG_FB_CFB_COPYAREA=y
CONFIG_FB_CFB_COPYAREA=y
CONFIG_FB_CFB_IMAGEBLIT=y
CONFIG_FB_CFB_IMAGEBLIT=y
CONFIG_FB_SOFT_CURSOR=y
CONFIG_FB_MACMODES=y
CONFIG_FB_MACMODES=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_MODE_HELPERS=y
CONFIG_FB_TILEBLITTING=y
CONFIG_FB_TILEBLITTING=y
...
@@ -905,6 +954,7 @@ CONFIG_FB_OF=y
...
@@ -905,6 +954,7 @@ CONFIG_FB_OF=y
# CONFIG_FB_ASILIANT is not set
# CONFIG_FB_ASILIANT is not set
# CONFIG_FB_IMSTT is not set
# CONFIG_FB_IMSTT is not set
# CONFIG_FB_VGA16 is not set
# CONFIG_FB_VGA16 is not set
# CONFIG_FB_S1D13XXX is not set
# CONFIG_FB_NVIDIA is not set
# CONFIG_FB_NVIDIA is not set
# CONFIG_FB_RIVA is not set
# CONFIG_FB_RIVA is not set
CONFIG_FB_MATROX=y
CONFIG_FB_MATROX=y
...
@@ -927,7 +977,6 @@ CONFIG_FB_RADEON_I2C=y
...
@@ -927,7 +977,6 @@ CONFIG_FB_RADEON_I2C=y
# CONFIG_FB_VOODOO1 is not set
# CONFIG_FB_VOODOO1 is not set
# CONFIG_FB_CYBLA is not set
# CONFIG_FB_CYBLA is not set
# CONFIG_FB_TRIDENT is not set
# CONFIG_FB_TRIDENT is not set
# CONFIG_FB_S1D13XXX is not set
# CONFIG_FB_VIRTUAL is not set
# CONFIG_FB_VIRTUAL is not set
#
#
...
@@ -936,6 +985,7 @@ CONFIG_FB_RADEON_I2C=y
...
@@ -936,6 +985,7 @@ CONFIG_FB_RADEON_I2C=y
# CONFIG_VGA_CONSOLE is not set
# CONFIG_VGA_CONSOLE is not set
CONFIG_DUMMY_CONSOLE=y
CONFIG_DUMMY_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE=y
CONFIG_FRAMEBUFFER_CONSOLE=y
# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
# CONFIG_FONTS is not set
# CONFIG_FONTS is not set
CONFIG_FONT_8x8=y
CONFIG_FONT_8x8=y
CONFIG_FONT_8x16=y
CONFIG_FONT_8x16=y
...
@@ -990,12 +1040,15 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y
...
@@ -990,12 +1040,15 @@ CONFIG_USB_OHCI_LITTLE_ENDIAN=y
#
#
# USB Device Class drivers
# USB Device Class drivers
#
#
# CONFIG_USB_BLUETOOTH_TTY is not set
# CONFIG_USB_ACM is not set
# CONFIG_USB_ACM is not set
# CONFIG_USB_PRINTER is not set
# CONFIG_USB_PRINTER is not set
#
#
# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support' may also be needed; see USB_STORAGE Help for more information
# NOTE: USB_STORAGE enables SCSI, and 'SCSI disk support'
#
#
# may also be needed; see USB_STORAGE Help for more information
#
#
CONFIG_USB_STORAGE=y
CONFIG_USB_STORAGE=y
# CONFIG_USB_STORAGE_DEBUG is not set
# CONFIG_USB_STORAGE_DEBUG is not set
...
@@ -1106,6 +1159,7 @@ CONFIG_INFINIBAND_MTHCA=m
...
@@ -1106,6 +1159,7 @@ CONFIG_INFINIBAND_MTHCA=m
# CONFIG_INFINIBAND_MTHCA_DEBUG is not set
# CONFIG_INFINIBAND_MTHCA_DEBUG is not set
CONFIG_INFINIBAND_IPOIB=m
CONFIG_INFINIBAND_IPOIB=m
# CONFIG_INFINIBAND_IPOIB_DEBUG is not set
# CONFIG_INFINIBAND_IPOIB_DEBUG is not set
# CONFIG_INFINIBAND_SRP is not set
#
#
# SN Devices
# SN Devices
...
@@ -1288,10 +1342,25 @@ CONFIG_NLS_ISO8859_1=y
...
@@ -1288,10 +1342,25 @@ CONFIG_NLS_ISO8859_1=y
# CONFIG_NLS_UTF8 is not set
# CONFIG_NLS_UTF8 is not set
#
#
# Profiling support
# Library routines
#
CONFIG_CRC_CCITT=m
# CONFIG_CRC16 is not set
CONFIG_CRC32=y
CONFIG_LIBCRC32C=m
CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=m
CONFIG_TEXTSEARCH=y
CONFIG_TEXTSEARCH_KMP=m
CONFIG_TEXTSEARCH_BM=m
CONFIG_TEXTSEARCH_FSM=m
#
# Instrumentation Support
#
#
CONFIG_PROFILING=y
CONFIG_PROFILING=y
CONFIG_OPROFILE=y
CONFIG_OPROFILE=y
# CONFIG_KPROBES is not set
#
#
# Kernel hacking
# Kernel hacking
...
@@ -1308,14 +1377,15 @@ CONFIG_DETECT_SOFTLOCKUP=y
...
@@ -1308,14 +1377,15 @@ CONFIG_DETECT_SOFTLOCKUP=y
# CONFIG_DEBUG_KOBJECT is not set
# CONFIG_DEBUG_KOBJECT is not set
# CONFIG_DEBUG_INFO is not set
# CONFIG_DEBUG_INFO is not set
CONFIG_DEBUG_FS=y
CONFIG_DEBUG_FS=y
# CONFIG_DEBUG_VM is not set
# CONFIG_RCU_TORTURE_TEST is not set
CONFIG_DEBUG_STACKOVERFLOW=y
CONFIG_DEBUG_STACKOVERFLOW=y
# CONFIG_KPROBES is not set
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUG_STACK_USAGE=y
CONFIG_DEBUGGER=y
CONFIG_DEBUGGER=y
CONFIG_XMON=y
CONFIG_XMON=y
CONFIG_XMON_DEFAULT=y
CONFIG_XMON_DEFAULT=y
# CONFIG_PPCDBG is not set
CONFIG_IRQSTACKS=y
CONFIG_IRQSTACKS=y
# CONFIG_BOOTX_TEXT is not set
#
#
# Security options
# Security options
...
@@ -1355,17 +1425,3 @@ CONFIG_CRYPTO_TEST=m
...
@@ -1355,17 +1425,3 @@ CONFIG_CRYPTO_TEST=m
#
#
# Hardware crypto devices
# Hardware crypto devices
#
#
#
# Library routines
#
CONFIG_CRC_CCITT=m
# CONFIG_CRC16 is not set
CONFIG_CRC32=y
CONFIG_LIBCRC32C=m
CONFIG_ZLIB_INFLATE=y
CONFIG_ZLIB_DEFLATE=m
CONFIG_TEXTSEARCH=y
CONFIG_TEXTSEARCH_KMP=m
CONFIG_TEXTSEARCH_BM=m
CONFIG_TEXTSEARCH_FSM=m
arch/powerpc/kernel/Makefile
View file @
0174f72f
...
@@ -25,7 +25,7 @@ obj-$(CONFIG_PPC_OF) += of_device.o
...
@@ -25,7 +25,7 @@ obj-$(CONFIG_PPC_OF) += of_device.o
procfs-$(CONFIG_PPC64)
:=
proc_ppc64.o
procfs-$(CONFIG_PPC64)
:=
proc_ppc64.o
obj-$(CONFIG_PROC_FS)
+=
$
(
procfs-y
)
obj-$(CONFIG_PROC_FS)
+=
$
(
procfs-y
)
rtaspci-$(CONFIG_PPC64)
:=
rtas_pci.o
rtaspci-$(CONFIG_PPC64)
:=
rtas_pci.o
obj-$(CONFIG_PPC_RTAS)
+=
rtas.o
$
(
rtaspci-y
)
obj-$(CONFIG_PPC_RTAS)
+=
rtas.o
rtas-rtc.o
$
(
rtaspci-y
)
obj-$(CONFIG_RTAS_FLASH)
+=
rtas_flash.o
obj-$(CONFIG_RTAS_FLASH)
+=
rtas_flash.o
obj-$(CONFIG_RTAS_PROC)
+=
rtas-proc.o
obj-$(CONFIG_RTAS_PROC)
+=
rtas-proc.o
obj-$(CONFIG_LPARCFG)
+=
lparcfg.o
obj-$(CONFIG_LPARCFG)
+=
lparcfg.o
...
@@ -49,12 +49,23 @@ extra-y += vmlinux.lds
...
@@ -49,12 +49,23 @@ extra-y += vmlinux.lds
obj-y
+=
process.o init_task.o time.o
\
obj-y
+=
process.o init_task.o time.o
\
prom.o traps.o setup-common.o
prom.o traps.o setup-common.o
obj-$(CONFIG_PPC32)
+=
entry_32.o setup_32.o misc_32.o systbl.o
obj-$(CONFIG_PPC32)
+=
entry_32.o setup_32.o misc_32.o systbl.o
obj-$(CONFIG_PPC64)
+=
misc_64.o
obj-$(CONFIG_PPC64)
+=
misc_64.o
dma_64.o iommu.o
obj-$(CONFIG_PPC_OF)
+=
prom_init.o
obj-$(CONFIG_PPC_OF)
+=
prom_init.o
obj-$(CONFIG_MODULES)
+=
ppc_ksyms.o
obj-$(CONFIG_MODULES)
+=
ppc_ksyms.o
obj-$(CONFIG_BOOTX_TEXT)
+=
btext.o
obj-$(CONFIG_BOOTX_TEXT)
+=
btext.o
obj-$(CONFIG_6xx)
+=
idle_6xx.o
obj-$(CONFIG_6xx)
+=
idle_6xx.o
obj-$(CONFIG_SMP)
+=
smp.o
obj-$(CONFIG_SMP)
+=
smp.o
obj-$(CONFIG_KPROBES)
+=
kprobes.o
module-$(CONFIG_PPC64)
+=
module_64.o
obj-$(CONFIG_MODULES)
+=
$
(
module-y
)
pci64-$(CONFIG_PPC64)
+=
pci_64.o pci_dn.o pci_iommu.o
\
pci_direct_iommu.o iomap.o
obj-$(CONFIG_PCI)
+=
$
(
pci64-y
)
kexec64-$(CONFIG_PPC64)
+=
machine_kexec_64.o
obj-$(CONFIG_KEXEC)
+=
$
(
kexec64-y
)
ifeq
($(CONFIG_PPC_ISERIES),y)
ifeq
($(CONFIG_PPC_ISERIES),y)
$(obj)/head_64.o
:
$(obj)/lparmap.s
$(obj)/head_64.o
:
$(obj)/lparmap.s
...
@@ -62,11 +73,8 @@ AFLAGS_head_64.o += -I$(obj)
...
@@ -62,11 +73,8 @@ AFLAGS_head_64.o += -I$(obj)
endif
endif
else
else
# stuff used from here for ARCH=ppc
or ARCH=ppc64
# stuff used from here for ARCH=ppc
smpobj-$(CONFIG_SMP)
+=
smp.o
smpobj-$(CONFIG_SMP)
+=
smp.o
obj-$(CONFIG_PPC64)
+=
traps.o process.o init_task.o time.o
\
setup-common.o
$
(
smpobj-y
)
endif
endif
...
...
arch/powerpc/kernel/asm-offsets.c
View file @
0174f72f
...
@@ -270,13 +270,15 @@ int main(void)
...
@@ -270,13 +270,15 @@ int main(void)
DEFINE
(
TVAL64_TV_USEC
,
offsetof
(
struct
timeval
,
tv_usec
));
DEFINE
(
TVAL64_TV_USEC
,
offsetof
(
struct
timeval
,
tv_usec
));
DEFINE
(
TVAL32_TV_SEC
,
offsetof
(
struct
compat_timeval
,
tv_sec
));
DEFINE
(
TVAL32_TV_SEC
,
offsetof
(
struct
compat_timeval
,
tv_sec
));
DEFINE
(
TVAL32_TV_USEC
,
offsetof
(
struct
compat_timeval
,
tv_usec
));
DEFINE
(
TVAL32_TV_USEC
,
offsetof
(
struct
compat_timeval
,
tv_usec
));
DEFINE
(
TSPC64_TV_SEC
,
offsetof
(
struct
timespec
,
tv_sec
));
DEFINE
(
TSPC64_TV_NSEC
,
offsetof
(
struct
timespec
,
tv_nsec
));
DEFINE
(
TSPC32_TV_SEC
,
offsetof
(
struct
compat_timespec
,
tv_sec
));
DEFINE
(
TSPC32_TV_SEC
,
offsetof
(
struct
compat_timespec
,
tv_sec
));
DEFINE
(
TSPC32_TV_NSEC
,
offsetof
(
struct
compat_timespec
,
tv_nsec
));
DEFINE
(
TSPC32_TV_NSEC
,
offsetof
(
struct
compat_timespec
,
tv_nsec
));
#else
#else
DEFINE
(
TVAL32_TV_SEC
,
offsetof
(
struct
timeval
,
tv_sec
));
DEFINE
(
TVAL32_TV_SEC
,
offsetof
(
struct
timeval
,
tv_sec
));
DEFINE
(
TVAL32_TV_USEC
,
offsetof
(
struct
timeval
,
tv_usec
));
DEFINE
(
TVAL32_TV_USEC
,
offsetof
(
struct
timeval
,
tv_usec
));
DEFINE
(
TSP
E
C32_TV_SEC
,
offsetof
(
struct
timespec
,
tv_sec
));
DEFINE
(
TSPC32_TV_SEC
,
offsetof
(
struct
timespec
,
tv_sec
));
DEFINE
(
TSP
E
C32_TV_NSEC
,
offsetof
(
struct
timespec
,
tv_nsec
));
DEFINE
(
TSPC32_TV_NSEC
,
offsetof
(
struct
timespec
,
tv_nsec
));
#endif
#endif
/* timeval/timezone offsets for use by vdso */
/* timeval/timezone offsets for use by vdso */
DEFINE
(
TZONE_TZ_MINWEST
,
offsetof
(
struct
timezone
,
tz_minuteswest
));
DEFINE
(
TZONE_TZ_MINWEST
,
offsetof
(
struct
timezone
,
tz_minuteswest
));
...
...
arch/p
pc64/kernel/dma
.c
→
arch/p
owerpc/kernel/dma_64
.c
View file @
0174f72f
File moved
arch/p
pc64
/kernel/iomap.c
→
arch/p
owerpc
/kernel/iomap.c
View file @
0174f72f
File moved
arch/p
pc64
/kernel/iommu.c
→
arch/p
owerpc
/kernel/iommu.c
View file @
0174f72f
File moved
arch/powerpc/kernel/irq.c
View file @
0174f72f
...
@@ -71,6 +71,11 @@
...
@@ -71,6 +71,11 @@
#include <asm/paca.h>
#include <asm/paca.h>
#endif
#endif
int
__irq_offset_value
;
#ifdef CONFIG_PPC32
EXPORT_SYMBOL
(
__irq_offset_value
);
#endif
static
int
ppc_spurious_interrupts
;
static
int
ppc_spurious_interrupts
;
#if defined(CONFIG_PPC_ISERIES) && defined(CONFIG_SMP)
#if defined(CONFIG_PPC_ISERIES) && defined(CONFIG_SMP)
...
@@ -98,7 +103,6 @@ extern atomic_t ipi_sent;
...
@@ -98,7 +103,6 @@ extern atomic_t ipi_sent;
EXPORT_SYMBOL
(
irq_desc
);
EXPORT_SYMBOL
(
irq_desc
);
int
distribute_irqs
=
1
;
int
distribute_irqs
=
1
;
int
__irq_offset_value
;
u64
ppc64_interrupt_controller
;
u64
ppc64_interrupt_controller
;
#endif
/* CONFIG_PPC64 */
#endif
/* CONFIG_PPC64 */
...
@@ -311,7 +315,6 @@ void __init init_IRQ(void)
...
@@ -311,7 +315,6 @@ void __init init_IRQ(void)
}
}
#ifdef CONFIG_PPC64
#ifdef CONFIG_PPC64
#ifndef CONFIG_PPC_ISERIES
/*
/*
* Virtual IRQ mapping code, used on systems with XICS interrupt controllers.
* Virtual IRQ mapping code, used on systems with XICS interrupt controllers.
*/
*/
...
@@ -420,8 +423,6 @@ unsigned int real_irq_to_virt_slowpath(unsigned int real_irq)
...
@@ -420,8 +423,6 @@ unsigned int real_irq_to_virt_slowpath(unsigned int real_irq)
}
}
#endif
/* CONFIG_PPC_ISERIES */
#ifdef CONFIG_IRQSTACKS
#ifdef CONFIG_IRQSTACKS
struct
thread_info
*
softirq_ctx
[
NR_CPUS
];
struct
thread_info
*
softirq_ctx
[
NR_CPUS
];
struct
thread_info
*
hardirq_ctx
[
NR_CPUS
];
struct
thread_info
*
hardirq_ctx
[
NR_CPUS
];
...
...
arch/p
pc64
/kernel/kprobes.c
→
arch/p
owerpc
/kernel/kprobes.c
View file @
0174f72f
File moved
arch/powerpc/kernel/lparcfg.c
View file @
0174f72f
...
@@ -42,32 +42,6 @@
...
@@ -42,32 +42,6 @@
/* #define LPARCFG_DEBUG */
/* #define LPARCFG_DEBUG */
/* find a better place for this function... */
static
void
log_plpar_hcall_return
(
unsigned
long
rc
,
char
*
tag
)
{
if
(
rc
==
0
)
/* success, return */
return
;
/* check for null tag ? */
if
(
rc
==
H_Hardware
)
printk
(
KERN_INFO
"plpar-hcall (%s) failed with hardware fault
\n
"
,
tag
);
else
if
(
rc
==
H_Function
)
printk
(
KERN_INFO
"plpar-hcall (%s) failed; function not allowed
\n
"
,
tag
);
else
if
(
rc
==
H_Authority
)
printk
(
KERN_INFO
"plpar-hcall (%s) failed; not authorized to this function
\n
"
,
tag
);
else
if
(
rc
==
H_Parameter
)
printk
(
KERN_INFO
"plpar-hcall (%s) failed; Bad parameter(s)
\n
"
,
tag
);
else
printk
(
KERN_INFO
"plpar-hcall (%s) failed with unexpected rc(0x%lx)
\n
"
,
tag
,
rc
);
}
static
struct
proc_dir_entry
*
proc_ppc64_lparcfg
;
static
struct
proc_dir_entry
*
proc_ppc64_lparcfg
;
#define LPARCFG_BUFF_SIZE 4096
#define LPARCFG_BUFF_SIZE 4096
...
@@ -172,6 +146,31 @@ static int lparcfg_data(struct seq_file *m, void *v)
...
@@ -172,6 +146,31 @@ static int lparcfg_data(struct seq_file *m, void *v)
/*
/*
* Methods used to fetch LPAR data when running on a pSeries platform.
* Methods used to fetch LPAR data when running on a pSeries platform.
*/
*/
/* find a better place for this function... */
static
void
log_plpar_hcall_return
(
unsigned
long
rc
,
char
*
tag
)
{
if
(
rc
==
0
)
/* success, return */
return
;
/* check for null tag ? */
if
(
rc
==
H_Hardware
)
printk
(
KERN_INFO
"plpar-hcall (%s) failed with hardware fault
\n
"
,
tag
);
else
if
(
rc
==
H_Function
)
printk
(
KERN_INFO
"plpar-hcall (%s) failed; function not allowed
\n
"
,
tag
);
else
if
(
rc
==
H_Authority
)
printk
(
KERN_INFO
"plpar-hcall (%s) failed; not authorized to this function
\n
"
,
tag
);
else
if
(
rc
==
H_Parameter
)
printk
(
KERN_INFO
"plpar-hcall (%s) failed; Bad parameter(s)
\n
"
,
tag
);
else
printk
(
KERN_INFO
"plpar-hcall (%s) failed with unexpected rc(0x%lx)
\n
"
,
tag
,
rc
);
}
/*
/*
* H_GET_PPP hcall returns info in 4 parms.
* H_GET_PPP hcall returns info in 4 parms.
...
...
arch/p
pc64/kernel/machine_kexec
.c
→
arch/p
owerpc/kernel/machine_kexec_64
.c
View file @
0174f72f
...
@@ -185,8 +185,8 @@ void kexec_copy_flush(struct kimage *image)
...
@@ -185,8 +185,8 @@ void kexec_copy_flush(struct kimage *image)
*/
*/
void
kexec_smp_down
(
void
*
arg
)
void
kexec_smp_down
(
void
*
arg
)
{
{
if
(
ppc_md
.
cpu_irq
_down
)
if
(
ppc_md
.
kexec_cpu
_down
)
ppc_md
.
cpu_irq_down
(
1
);
ppc_md
.
kexec_cpu_down
(
0
,
1
);
local_irq_disable
();
local_irq_disable
();
kexec_smp_wait
();
kexec_smp_wait
();
...
@@ -233,8 +233,8 @@ static void kexec_prepare_cpus(void)
...
@@ -233,8 +233,8 @@ static void kexec_prepare_cpus(void)
}
}
/* after we tell the others to go down */
/* after we tell the others to go down */
if
(
ppc_md
.
cpu_irq
_down
)
if
(
ppc_md
.
kexec_cpu
_down
)
ppc_md
.
cpu_irq_down
(
0
);
ppc_md
.
kexec_cpu_down
(
0
,
0
);
put_cpu
();
put_cpu
();
...
@@ -255,8 +255,8 @@ static void kexec_prepare_cpus(void)
...
@@ -255,8 +255,8 @@ static void kexec_prepare_cpus(void)
* UP to an SMP kernel.
* UP to an SMP kernel.
*/
*/
smp_release_cpus
();
smp_release_cpus
();
if
(
ppc_md
.
cpu_irq
_down
)
if
(
ppc_md
.
kexec_cpu
_down
)
ppc_md
.
cpu_irq_down
(
0
);
ppc_md
.
kexec_cpu_down
(
0
,
0
);
local_irq_disable
();
local_irq_disable
();
}
}
...
@@ -305,3 +305,54 @@ void machine_kexec(struct kimage *image)
...
@@ -305,3 +305,54 @@ void machine_kexec(struct kimage *image)
ppc_md
.
hpte_clear_all
);
ppc_md
.
hpte_clear_all
);
/* NOTREACHED */
/* NOTREACHED */
}
}
/* Values we need to export to the second kernel via the device tree. */
static
unsigned
long
htab_base
,
htab_size
,
kernel_end
;
static
struct
property
htab_base_prop
=
{
.
name
=
"linux,htab-base"
,
.
length
=
sizeof
(
unsigned
long
),
.
value
=
(
unsigned
char
*
)
&
htab_base
,
};
static
struct
property
htab_size_prop
=
{
.
name
=
"linux,htab-size"
,
.
length
=
sizeof
(
unsigned
long
),
.
value
=
(
unsigned
char
*
)
&
htab_size
,
};
static
struct
property
kernel_end_prop
=
{
.
name
=
"linux,kernel-end"
,
.
length
=
sizeof
(
unsigned
long
),
.
value
=
(
unsigned
char
*
)
&
kernel_end
,
};
static
void
__init
export_htab_values
(
void
)
{
struct
device_node
*
node
;
node
=
of_find_node_by_path
(
"/chosen"
);
if
(
!
node
)
return
;
kernel_end
=
__pa
(
_end
);
prom_add_property
(
node
,
&
kernel_end_prop
);
/* On machines with no htab htab_address is NULL */
if
(
NULL
==
htab_address
)
goto
out
;
htab_base
=
__pa
(
htab_address
);
prom_add_property
(
node
,
&
htab_base_prop
);
htab_size
=
1UL
<<
ppc64_pft_size
;
prom_add_property
(
node
,
&
htab_size_prop
);
out:
of_node_put
(
node
);
}
void
__init
kexec_setup
(
void
)
{
export_htab_values
();
}
arch/p
pc64/kernel/module
.c
→
arch/p
owerpc/kernel/module_64
.c
View file @
0174f72f
File moved
arch/p
pc64/kernel/pci
.c
→
arch/p
owerpc/kernel/pci_64
.c
View file @
0174f72f
File moved
arch/p
pc64
/kernel/pci_direct_iommu.c
→
arch/p
owerpc
/kernel/pci_direct_iommu.c
View file @
0174f72f
File moved
arch/p
pc64
/kernel/pci_dn.c
→
arch/p
owerpc
/kernel/pci_dn.c
View file @
0174f72f
File moved
arch/p
pc64
/kernel/pci_iommu.c
→
arch/p
owerpc
/kernel/pci_iommu.c
View file @
0174f72f
File moved
arch/powerpc/kernel/prom.c
View file @
0174f72f
...
@@ -1368,6 +1368,7 @@ prom_n_addr_cells(struct device_node* np)
...
@@ -1368,6 +1368,7 @@ prom_n_addr_cells(struct device_node* np)
/* No #address-cells property for the root node, default to 1 */
/* No #address-cells property for the root node, default to 1 */
return
1
;
return
1
;
}
}
EXPORT_SYMBOL
(
prom_n_addr_cells
);
int
int
prom_n_size_cells
(
struct
device_node
*
np
)
prom_n_size_cells
(
struct
device_node
*
np
)
...
@@ -1383,6 +1384,7 @@ prom_n_size_cells(struct device_node* np)
...
@@ -1383,6 +1384,7 @@ prom_n_size_cells(struct device_node* np)
/* No #size-cells property for the root node, default to 1 */
/* No #size-cells property for the root node, default to 1 */
return
1
;
return
1
;
}
}
EXPORT_SYMBOL
(
prom_n_size_cells
);
/**
/**
* Work out the sense (active-low level / active-high edge)
* Work out the sense (active-low level / active-high edge)
...
...
arch/powerpc/kernel/rtas-rtc.c
0 → 100644
View file @
0174f72f
#include <linux/kernel.h>
#include <linux/time.h>
#include <linux/timer.h>
#include <linux/init.h>
#include <linux/rtc.h>
#include <linux/delay.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/time.h>
#define MAX_RTC_WAIT 5000
/* 5 sec */
#define RTAS_CLOCK_BUSY (-2)
unsigned
long
__init
rtas_get_boot_time
(
void
)
{
int
ret
[
8
];
int
error
,
wait_time
;
unsigned
long
max_wait_tb
;
max_wait_tb
=
get_tb
()
+
tb_ticks_per_usec
*
1000
*
MAX_RTC_WAIT
;
do
{
error
=
rtas_call
(
rtas_token
(
"get-time-of-day"
),
0
,
8
,
ret
);
if
(
error
==
RTAS_CLOCK_BUSY
||
rtas_is_extended_busy
(
error
))
{
wait_time
=
rtas_extended_busy_delay_time
(
error
);
/* This is boot time so we spin. */
udelay
(
wait_time
*
1000
);
error
=
RTAS_CLOCK_BUSY
;
}
}
while
(
error
==
RTAS_CLOCK_BUSY
&&
(
get_tb
()
<
max_wait_tb
));
if
(
error
!=
0
&&
printk_ratelimit
())
{
printk
(
KERN_WARNING
"error: reading the clock failed (%d)
\n
"
,
error
);
return
0
;
}
return
mktime
(
ret
[
0
],
ret
[
1
],
ret
[
2
],
ret
[
3
],
ret
[
4
],
ret
[
5
]);
}
/* NOTE: get_rtc_time will get an error if executed in interrupt context
* and if a delay is needed to read the clock. In this case we just
* silently return without updating rtc_tm.
*/
void
rtas_get_rtc_time
(
struct
rtc_time
*
rtc_tm
)
{
int
ret
[
8
];
int
error
,
wait_time
;
unsigned
long
max_wait_tb
;
max_wait_tb
=
get_tb
()
+
tb_ticks_per_usec
*
1000
*
MAX_RTC_WAIT
;
do
{
error
=
rtas_call
(
rtas_token
(
"get-time-of-day"
),
0
,
8
,
ret
);
if
(
error
==
RTAS_CLOCK_BUSY
||
rtas_is_extended_busy
(
error
))
{
if
(
in_interrupt
()
&&
printk_ratelimit
())
{
memset
(
&
rtc_tm
,
0
,
sizeof
(
struct
rtc_time
));
printk
(
KERN_WARNING
"error: reading clock"
" would delay interrupt
\n
"
);
return
;
/* delay not allowed */
}
wait_time
=
rtas_extended_busy_delay_time
(
error
);
msleep
(
wait_time
);
error
=
RTAS_CLOCK_BUSY
;
}
}
while
(
error
==
RTAS_CLOCK_BUSY
&&
(
get_tb
()
<
max_wait_tb
));
if
(
error
!=
0
&&
printk_ratelimit
())
{
printk
(
KERN_WARNING
"error: reading the clock failed (%d)
\n
"
,
error
);
return
;
}
rtc_tm
->
tm_sec
=
ret
[
5
];
rtc_tm
->
tm_min
=
ret
[
4
];
rtc_tm
->
tm_hour
=
ret
[
3
];
rtc_tm
->
tm_mday
=
ret
[
2
];
rtc_tm
->
tm_mon
=
ret
[
1
]
-
1
;
rtc_tm
->
tm_year
=
ret
[
0
]
-
1900
;
}
int
rtas_set_rtc_time
(
struct
rtc_time
*
tm
)
{
int
error
,
wait_time
;
unsigned
long
max_wait_tb
;
max_wait_tb
=
get_tb
()
+
tb_ticks_per_usec
*
1000
*
MAX_RTC_WAIT
;
do
{
error
=
rtas_call
(
rtas_token
(
"set-time-of-day"
),
7
,
1
,
NULL
,
tm
->
tm_year
+
1900
,
tm
->
tm_mon
+
1
,
tm
->
tm_mday
,
tm
->
tm_hour
,
tm
->
tm_min
,
tm
->
tm_sec
,
0
);
if
(
error
==
RTAS_CLOCK_BUSY
||
rtas_is_extended_busy
(
error
))
{
if
(
in_interrupt
())
return
1
;
/* probably decrementer */
wait_time
=
rtas_extended_busy_delay_time
(
error
);
msleep
(
wait_time
);
error
=
RTAS_CLOCK_BUSY
;
}
}
while
(
error
==
RTAS_CLOCK_BUSY
&&
(
get_tb
()
<
max_wait_tb
));
if
(
error
!=
0
&&
printk_ratelimit
())
printk
(
KERN_WARNING
"error: setting the clock failed (%d)
\n
"
,
error
);
return
0
;
}
arch/powerpc/kernel/setup_32.c
View file @
0174f72f
...
@@ -57,10 +57,6 @@ extern void power4_idle(void);
...
@@ -57,10 +57,6 @@ extern void power4_idle(void);
boot_infos_t
*
boot_infos
;
boot_infos_t
*
boot_infos
;
struct
ide_machdep_calls
ppc_ide_md
;
struct
ide_machdep_calls
ppc_ide_md
;
/* XXX should go elsewhere */
int
__irq_offset_value
;
EXPORT_SYMBOL
(
__irq_offset_value
);
int
boot_cpuid
;
int
boot_cpuid
;
EXPORT_SYMBOL_GPL
(
boot_cpuid
);
EXPORT_SYMBOL_GPL
(
boot_cpuid
);
int
boot_cpuid_phys
;
int
boot_cpuid_phys
;
...
...
arch/powerpc/kernel/setup_64.c
View file @
0174f72f
...
@@ -59,6 +59,7 @@
...
@@ -59,6 +59,7 @@
#include <asm/firmware.h>
#include <asm/firmware.h>
#include <asm/xmon.h>
#include <asm/xmon.h>
#include <asm/udbg.h>
#include <asm/udbg.h>
#include <asm/kexec.h>
#include "setup.h"
#include "setup.h"
...
@@ -415,6 +416,10 @@ void __init setup_system(void)
...
@@ -415,6 +416,10 @@ void __init setup_system(void)
*/
*/
unflatten_device_tree
();
unflatten_device_tree
();
#ifdef CONFIG_KEXEC
kexec_setup
();
/* requires unflattened device tree. */
#endif
/*
/*
* Fill the ppc64_caches & systemcfg structures with informations
* Fill the ppc64_caches & systemcfg structures with informations
* retreived from the device-tree. Need to be called before
* retreived from the device-tree. Need to be called before
...
...
arch/powerpc/kernel/signal_32.c
View file @
0174f72f
...
@@ -403,8 +403,6 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
...
@@ -403,8 +403,6 @@ static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
ELF_NFPREG
*
sizeof
(
double
)))
ELF_NFPREG
*
sizeof
(
double
)))
return
1
;
return
1
;
current
->
thread
.
fpscr
.
val
=
0
;
/* turn off all fp exceptions */
#ifdef CONFIG_ALTIVEC
#ifdef CONFIG_ALTIVEC
/* save altivec registers */
/* save altivec registers */
if
(
current
->
thread
.
used_vr
)
{
if
(
current
->
thread
.
used_vr
)
{
...
@@ -818,6 +816,9 @@ static int handle_rt_signal(unsigned long sig, struct k_sigaction *ka,
...
@@ -818,6 +816,9 @@ static int handle_rt_signal(unsigned long sig, struct k_sigaction *ka,
goto
badframe
;
goto
badframe
;
regs
->
link
=
(
unsigned
long
)
frame
->
tramp
;
regs
->
link
=
(
unsigned
long
)
frame
->
tramp
;
}
}
current
->
thread
.
fpscr
.
val
=
0
;
/* turn off all fp exceptions */
if
(
put_user
(
regs
->
gpr
[
1
],
(
u32
__user
*
)
newsp
))
if
(
put_user
(
regs
->
gpr
[
1
],
(
u32
__user
*
)
newsp
))
goto
badframe
;
goto
badframe
;
regs
->
gpr
[
1
]
=
newsp
;
regs
->
gpr
[
1
]
=
newsp
;
...
@@ -1097,6 +1098,8 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka,
...
@@ -1097,6 +1098,8 @@ static int handle_signal(unsigned long sig, struct k_sigaction *ka,
regs
->
link
=
(
unsigned
long
)
frame
->
mctx
.
tramp
;
regs
->
link
=
(
unsigned
long
)
frame
->
mctx
.
tramp
;
}
}
current
->
thread
.
fpscr
.
val
=
0
;
/* turn off all fp exceptions */
if
(
put_user
(
regs
->
gpr
[
1
],
(
u32
__user
*
)
newsp
))
if
(
put_user
(
regs
->
gpr
[
1
],
(
u32
__user
*
)
newsp
))
goto
badframe
;
goto
badframe
;
regs
->
gpr
[
1
]
=
newsp
;
regs
->
gpr
[
1
]
=
newsp
;
...
...
arch/powerpc/kernel/signal_64.c
View file @
0174f72f
...
@@ -131,9 +131,6 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
...
@@ -131,9 +131,6 @@ static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
flush_fp_to_thread
(
current
);
flush_fp_to_thread
(
current
);
/* Make sure signal doesn't get spurrious FP exceptions */
current
->
thread
.
fpscr
.
val
=
0
;
#ifdef CONFIG_ALTIVEC
#ifdef CONFIG_ALTIVEC
err
|=
__put_user
(
v_regs
,
&
sc
->
v_regs
);
err
|=
__put_user
(
v_regs
,
&
sc
->
v_regs
);
...
@@ -423,6 +420,9 @@ static int setup_rt_frame(int signr, struct k_sigaction *ka, siginfo_t *info,
...
@@ -423,6 +420,9 @@ static int setup_rt_frame(int signr, struct k_sigaction *ka, siginfo_t *info,
if
(
err
)
if
(
err
)
goto
badframe
;
goto
badframe
;
/* Make sure signal handler doesn't get spurious FP exceptions */
current
->
thread
.
fpscr
.
val
=
0
;
/* Set up to return from userspace. */
/* Set up to return from userspace. */
if
(
vdso64_rt_sigtramp
&&
current
->
thread
.
vdso_base
)
{
if
(
vdso64_rt_sigtramp
&&
current
->
thread
.
vdso_base
)
{
regs
->
link
=
current
->
thread
.
vdso_base
+
vdso64_rt_sigtramp
;
regs
->
link
=
current
->
thread
.
vdso_base
+
vdso64_rt_sigtramp
;
...
...
arch/powerpc/kernel/vdso32/datapage.S
View file @
0174f72f
...
@@ -77,8 +77,9 @@ V_FUNCTION_BEGIN(__kernel_get_tbfreq)
...
@@ -77,8 +77,9 @@ V_FUNCTION_BEGIN(__kernel_get_tbfreq)
mflr
r12
mflr
r12
.
cfi_register
lr
,
r12
.
cfi_register
lr
,
r12
bl
__get_datapage
@
local
bl
__get_datapage
@
local
lwz
r3
,
CFG_TB_TICKS_PER_SEC
(
r3
)
lwz
r4
,(
CFG_TB_TICKS_PER_SEC
+
4
)(
r3
)
lwz
r4
,(
CFG_TB_TICKS_PER_SEC
+
4
)(
r3
)
lwz
r3
,
CFG_TB_TICKS_PER_SEC
(
r3
)
mtlr
r12
mtlr
r12
blr
.
cfi_endproc
.
cfi_endproc
V_FUNCTION_END
(
__kernel_get_tbfreq
)
V_FUNCTION_END
(
__kernel_get_tbfreq
)
arch/powerpc/kernel/vdso32/gettimeofday.S
View file @
0174f72f
...
@@ -83,7 +83,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
...
@@ -83,7 +83,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
/
*
Check
for
supported
clock
IDs
*/
/
*
Check
for
supported
clock
IDs
*/
cmpli
cr0
,
r3
,
CLOCK_REALTIME
cmpli
cr0
,
r3
,
CLOCK_REALTIME
cmpli
cr1
,
r3
,
CLOCK_MONOTONIC
cmpli
cr1
,
r3
,
CLOCK_MONOTONIC
cror
cr0
,
cr0
,
cr1
cror
cr0
*
4
+
eq
,
cr0
*
4
+
eq
,
cr1
*
4
+
eq
bne
cr0
,
99
f
bne
cr0
,
99
f
mflr
r12
/*
r12
saves
lr
*/
mflr
r12
/*
r12
saves
lr
*/
...
@@ -91,7 +91,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
...
@@ -91,7 +91,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
mr
r10
,
r3
/*
r10
saves
id
*/
mr
r10
,
r3
/*
r10
saves
id
*/
mr
r11
,
r4
/*
r11
saves
tp
*/
mr
r11
,
r4
/*
r11
saves
tp
*/
bl
__get_datapage
@
local
/*
get
data
page
*/
bl
__get_datapage
@
local
/*
get
data
page
*/
mr
r9
,
r3
/*
datapage
ptr
in
r9
*/
mr
r9
,
r3
/*
datapage
ptr
in
r9
*/
beq
cr1
,
50
f
/*
if
monotonic
->
jump
there
*/
beq
cr1
,
50
f
/*
if
monotonic
->
jump
there
*/
/
*
/
*
...
@@ -173,10 +173,14 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
...
@@ -173,10 +173,14 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
add
r4
,
r4
,
r7
add
r4
,
r4
,
r7
lis
r5
,
NSEC_PER_SEC
@
h
lis
r5
,
NSEC_PER_SEC
@
h
ori
r5
,
r5
,
NSEC_PER_SEC
@
l
ori
r5
,
r5
,
NSEC_PER_SEC
@
l
cmpli
cr0
,
r4
,
r5
cmpl
cr0
,
r4
,
r5
cmpli
cr1
,
r4
,
0
blt
1
f
blt
1
f
subf
r4
,
r5
,
r4
subf
r4
,
r5
,
r4
addi
r3
,
r3
,
1
addi
r3
,
r3
,
1
1
:
bge
cr1
,
1
f
addi
r3
,
r3
,-
1
add
r4
,
r4
,
r5
1
:
stw
r3
,
TSPC32_TV_SEC
(
r11
)
1
:
stw
r3
,
TSPC32_TV_SEC
(
r11
)
stw
r4
,
TSPC32_TV_NSEC
(
r11
)
stw
r4
,
TSPC32_TV_NSEC
(
r11
)
...
@@ -210,7 +214,7 @@ V_FUNCTION_BEGIN(__kernel_clock_getres)
...
@@ -210,7 +214,7 @@ V_FUNCTION_BEGIN(__kernel_clock_getres)
/
*
Check
for
supported
clock
IDs
*/
/
*
Check
for
supported
clock
IDs
*/
cmpwi
cr0
,
r3
,
CLOCK_REALTIME
cmpwi
cr0
,
r3
,
CLOCK_REALTIME
cmpwi
cr1
,
r3
,
CLOCK_MONOTONIC
cmpwi
cr1
,
r3
,
CLOCK_MONOTONIC
cror
cr0
,
cr0
,
cr1
cror
cr0
*
4
+
eq
,
cr0
*
4
+
eq
,
cr1
*
4
+
eq
bne
cr0
,
99
f
bne
cr0
,
99
f
li
r3
,
0
li
r3
,
0
...
...
arch/powerpc/kernel/vdso64/datapage.S
View file @
0174f72f
...
@@ -80,5 +80,6 @@ V_FUNCTION_BEGIN(__kernel_get_tbfreq)
...
@@ -80,5 +80,6 @@ V_FUNCTION_BEGIN(__kernel_get_tbfreq)
bl
V_LOCAL_FUNC
(
__get_datapage
)
bl
V_LOCAL_FUNC
(
__get_datapage
)
ld
r3
,
CFG_TB_TICKS_PER_SEC
(
r3
)
ld
r3
,
CFG_TB_TICKS_PER_SEC
(
r3
)
mtlr
r12
mtlr
r12
blr
.
cfi_endproc
.
cfi_endproc
V_FUNCTION_END
(
__kernel_get_tbfreq
)
V_FUNCTION_END
(
__kernel_get_tbfreq
)
arch/powerpc/kernel/vdso64/gettimeofday.S
View file @
0174f72f
/*
/
*
*
Userland
implementation
of
gettimeofday
()
for
64
bits
processes
in
a
*
Userland
implementation
of
gettimeofday
()
for
64
bits
processes
in
a
*
ppc64
kernel
for
use
in
the
vDSO
*
ppc64
kernel
for
use
in
the
vDSO
*
*
...
@@ -68,7 +69,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
...
@@ -68,7 +69,7 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
/
*
Check
for
supported
clock
IDs
*/
/
*
Check
for
supported
clock
IDs
*/
cmpwi
cr0
,
r3
,
CLOCK_REALTIME
cmpwi
cr0
,
r3
,
CLOCK_REALTIME
cmpwi
cr1
,
r3
,
CLOCK_MONOTONIC
cmpwi
cr1
,
r3
,
CLOCK_MONOTONIC
cror
cr0
,
cr0
,
cr1
cror
cr0
*
4
+
eq
,
cr0
*
4
+
eq
,
cr1
*
4
+
eq
bne
cr0
,
99
f
bne
cr0
,
99
f
mflr
r12
/*
r12
saves
lr
*/
mflr
r12
/*
r12
saves
lr
*/
...
@@ -84,16 +85,17 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
...
@@ -84,16 +85,17 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
bl
V_LOCAL_FUNC
(
__do_get_xsec
)
/*
get
xsec
from
tb
&
kernel
*/
bl
V_LOCAL_FUNC
(
__do_get_xsec
)
/*
get
xsec
from
tb
&
kernel
*/
lis
r7
,
0x3b9a
/*
r7
=
1000000000
=
N
SEC_PER_SEC
*/
lis
r7
,
15
/*
r7
=
1000000
=
U
SEC_PER_SEC
*/
ori
r7
,
r7
,
0xca0
0
ori
r7
,
r7
,
1696
0
rldicl
r5
,
r4
,
44
,
20
/*
r5
=
sec
=
xsec
/
XSEC_PER_SEC
*/
rldicl
r5
,
r4
,
44
,
20
/*
r5
=
sec
=
xsec
/
XSEC_PER_SEC
*/
rldicr
r6
,
r5
,
20
,
43
/*
r6
=
sec
*
XSEC_PER_SEC
*/
rldicr
r6
,
r5
,
20
,
43
/*
r6
=
sec
*
XSEC_PER_SEC
*/
std
r5
,
TSPC64_TV_SEC
(
r11
)
/*
store
sec
in
tv
*/
std
r5
,
TSPC64_TV_SEC
(
r11
)
/*
store
sec
in
tv
*/
subf
r0
,
r6
,
r4
/*
r0
=
xsec
=
(
xsec
-
r6
)
*/
subf
r0
,
r6
,
r4
/*
r0
=
xsec
=
(
xsec
-
r6
)
*/
mulld
r0
,
r0
,
r7
/*
nsec
=
(
xsec
*
N
SEC_PER_SEC
)
/
mulld
r0
,
r0
,
r7
/*
usec
=
(
xsec
*
U
SEC_PER_SEC
)
/
*
XSEC_PER_SEC
*
XSEC_PER_SEC
*/
*/
rldicl
r0
,
r0
,
44
,
20
rldicl
r0
,
r0
,
44
,
20
mulli
r0
,
r0
,
1000
/*
nsec
=
usec
*
1000
*/
std
r0
,
TSPC64_TV_NSEC
(
r11
)
/*
store
nsec
in
tp
*/
std
r0
,
TSPC64_TV_NSEC
(
r11
)
/*
store
nsec
in
tp
*/
mtlr
r12
mtlr
r12
...
@@ -106,15 +108,16 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
...
@@ -106,15 +108,16 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
50
:
bl
V_LOCAL_FUNC
(
__do_get_xsec
)
/*
get
xsec
from
tb
&
kernel
*/
50
:
bl
V_LOCAL_FUNC
(
__do_get_xsec
)
/*
get
xsec
from
tb
&
kernel
*/
lis
r7
,
0x3b9a
/*
r7
=
1000000000
=
N
SEC_PER_SEC
*/
lis
r7
,
15
/*
r7
=
1000000
=
U
SEC_PER_SEC
*/
ori
r7
,
r7
,
0xca0
0
ori
r7
,
r7
,
1696
0
rldicl
r5
,
r4
,
44
,
20
/*
r5
=
sec
=
xsec
/
XSEC_PER_SEC
*/
rldicl
r5
,
r4
,
44
,
20
/*
r5
=
sec
=
xsec
/
XSEC_PER_SEC
*/
rldicr
r6
,
r5
,
20
,
43
/*
r6
=
sec
*
XSEC_PER_SEC
*/
rldicr
r6
,
r5
,
20
,
43
/*
r6
=
sec
*
XSEC_PER_SEC
*/
subf
r0
,
r6
,
r4
/*
r0
=
xsec
=
(
xsec
-
r6
)
*/
subf
r0
,
r6
,
r4
/*
r0
=
xsec
=
(
xsec
-
r6
)
*/
mulld
r0
,
r0
,
r7
/*
nsec
=
(
xsec
*
N
SEC_PER_SEC
)
/
mulld
r0
,
r0
,
r7
/*
usec
=
(
xsec
*
U
SEC_PER_SEC
)
/
*
XSEC_PER_SEC
*
XSEC_PER_SEC
*/
*/
rldicl
r6
,
r0
,
44
,
20
rldicl
r6
,
r0
,
44
,
20
mulli
r6
,
r6
,
1000
/*
nsec
=
usec
*
1000
*/
/
*
now
we
must
fixup
using
wall
to
monotonic
.
We
need
to
snapshot
/
*
now
we
must
fixup
using
wall
to
monotonic
.
We
need
to
snapshot
*
that
value
and
do
the
counter
trick
again
.
Fortunately
,
we
still
*
that
value
and
do
the
counter
trick
again
.
Fortunately
,
we
still
...
@@ -123,8 +126,8 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
...
@@ -123,8 +126,8 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
*
can
be
used
*
can
be
used
*/
*/
lw
z
r4
,
WTOM_CLOCK_SEC
(
r9
)
lw
a
r4
,
WTOM_CLOCK_SEC
(
r3
)
lw
z
r7
,
WTOM_CLOCK_NSEC
(
r9
)
lw
a
r7
,
WTOM_CLOCK_NSEC
(
r3
)
/
*
We
now
have
our
result
in
r4
,
r7
.
We
create
a
fake
dependency
/
*
We
now
have
our
result
in
r4
,
r7
.
We
create
a
fake
dependency
*
on
that
result
and
re
-
check
the
counter
*
on
that
result
and
re
-
check
the
counter
...
@@ -144,10 +147,14 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
...
@@ -144,10 +147,14 @@ V_FUNCTION_BEGIN(__kernel_clock_gettime)
add
r7
,
r7
,
r6
add
r7
,
r7
,
r6
lis
r9
,
NSEC_PER_SEC
@
h
lis
r9
,
NSEC_PER_SEC
@
h
ori
r9
,
r9
,
NSEC_PER_SEC
@
l
ori
r9
,
r9
,
NSEC_PER_SEC
@
l
cmpli
cr0
,
r7
,
r9
cmpl
cr0
,
r7
,
r9
cmpli
cr1
,
r7
,
0
blt
1
f
blt
1
f
subf
r7
,
r9
,
r7
subf
r7
,
r9
,
r7
addi
r4
,
r4
,
1
addi
r4
,
r4
,
1
1
:
bge
cr1
,
1
f
addi
r4
,
r4
,-
1
add
r7
,
r7
,
r9
1
:
std
r4
,
TSPC64_TV_SEC
(
r11
)
1
:
std
r4
,
TSPC64_TV_SEC
(
r11
)
std
r7
,
TSPC64_TV_NSEC
(
r11
)
std
r7
,
TSPC64_TV_NSEC
(
r11
)
...
@@ -181,7 +188,7 @@ V_FUNCTION_BEGIN(__kernel_clock_getres)
...
@@ -181,7 +188,7 @@ V_FUNCTION_BEGIN(__kernel_clock_getres)
/
*
Check
for
supported
clock
IDs
*/
/
*
Check
for
supported
clock
IDs
*/
cmpwi
cr0
,
r3
,
CLOCK_REALTIME
cmpwi
cr0
,
r3
,
CLOCK_REALTIME
cmpwi
cr1
,
r3
,
CLOCK_MONOTONIC
cmpwi
cr1
,
r3
,
CLOCK_MONOTONIC
cror
cr0
,
cr0
,
cr1
cror
cr0
*
4
+
eq
,
cr0
*
4
+
eq
,
cr1
*
4
+
eq
bne
cr0
,
99
f
bne
cr0
,
99
f
li
r3
,
0
li
r3
,
0
...
...
arch/powerpc/platforms/iseries/irq.c
View file @
0174f72f
...
@@ -42,13 +42,6 @@
...
@@ -42,13 +42,6 @@
#include "irq.h"
#include "irq.h"
#include "call_pci.h"
#include "call_pci.h"
/* This maps virtual irq numbers to real irqs */
unsigned
int
virt_irq_to_real_map
[
NR_IRQS
];
/* The next available virtual irq number */
/* Note: the pcnet32 driver assumes irq numbers < 2 aren't valid. :( */
static
int
next_virtual_irq
=
2
;
static
long
Pci_Interrupt_Count
;
static
long
Pci_Interrupt_Count
;
static
long
Pci_Event_Count
;
static
long
Pci_Event_Count
;
...
@@ -350,26 +343,14 @@ static hw_irq_controller iSeries_IRQ_handler = {
...
@@ -350,26 +343,14 @@ static hw_irq_controller iSeries_IRQ_handler = {
int
__init
iSeries_allocate_IRQ
(
HvBusNumber
busNumber
,
int
__init
iSeries_allocate_IRQ
(
HvBusNumber
busNumber
,
HvSubBusNumber
subBusNumber
,
HvAgentId
deviceId
)
HvSubBusNumber
subBusNumber
,
HvAgentId
deviceId
)
{
{
unsigned
int
realirq
,
virtirq
;
int
virtirq
;
unsigned
int
realirq
;
u8
idsel
=
(
deviceId
>>
4
);
u8
idsel
=
(
deviceId
>>
4
);
u8
function
=
deviceId
&
7
;
u8
function
=
deviceId
&
7
;
virtirq
=
next_virtual_irq
++
;
realirq
=
((
busNumber
-
1
)
<<
6
)
+
((
idsel
-
1
)
<<
3
)
+
function
;
realirq
=
((
busNumber
-
1
)
<<
6
)
+
((
idsel
-
1
)
<<
3
)
+
function
;
virt
_irq_to_real_map
[
virtirq
]
=
realirq
;
virt
irq
=
virt_irq_create_mapping
(
realirq
)
;
irq_desc
[
virtirq
].
handler
=
&
iSeries_IRQ_handler
;
irq_desc
[
virtirq
].
handler
=
&
iSeries_IRQ_handler
;
return
virtirq
;
return
virtirq
;
}
}
int
virt_irq_create_mapping
(
unsigned
int
real_irq
)
{
BUG
();
/* Don't call this on iSeries, yet */
return
0
;
}
void
virt_irq_init
(
void
)
{
return
;
}
arch/powerpc/platforms/iseries/setup.c
View file @
0174f72f
...
@@ -39,7 +39,6 @@
...
@@ -39,7 +39,6 @@
#include <asm/sections.h>
#include <asm/sections.h>
#include <asm/iommu.h>
#include <asm/iommu.h>
#include <asm/firmware.h>
#include <asm/firmware.h>
#include <asm/systemcfg.h>
#include <asm/system.h>
#include <asm/system.h>
#include <asm/time.h>
#include <asm/time.h>
#include <asm/paca.h>
#include <asm/paca.h>
...
@@ -548,8 +547,6 @@ static unsigned long __init build_iSeries_Memory_Map(void)
...
@@ -548,8 +547,6 @@ static unsigned long __init build_iSeries_Memory_Map(void)
*/
*/
static
void
__init
iSeries_setup_arch
(
void
)
static
void
__init
iSeries_setup_arch
(
void
)
{
{
unsigned
procIx
=
get_paca
()
->
lppaca
.
dyn_hv_phys_proc_index
;
if
(
get_paca
()
->
lppaca
.
shared_proc
)
{
if
(
get_paca
()
->
lppaca
.
shared_proc
)
{
ppc_md
.
idle_loop
=
iseries_shared_idle
;
ppc_md
.
idle_loop
=
iseries_shared_idle
;
printk
(
KERN_INFO
"Using shared processor idle loop
\n
"
);
printk
(
KERN_INFO
"Using shared processor idle loop
\n
"
);
...
@@ -565,9 +562,6 @@ static void __init iSeries_setup_arch(void)
...
@@ -565,9 +562,6 @@ static void __init iSeries_setup_arch(void)
itVpdAreas
.
xSlicMaxLogicalProcs
);
itVpdAreas
.
xSlicMaxLogicalProcs
);
printk
(
"Max physical processors = %d
\n
"
,
printk
(
"Max physical processors = %d
\n
"
,
itVpdAreas
.
xSlicMaxPhysicalProcs
);
itVpdAreas
.
xSlicMaxPhysicalProcs
);
_systemcfg
->
processor
=
xIoHriProcessorVpd
[
procIx
].
xPVR
;
printk
(
"Processor version = %x
\n
"
,
_systemcfg
->
processor
);
}
}
static
void
iSeries_show_cpuinfo
(
struct
seq_file
*
m
)
static
void
iSeries_show_cpuinfo
(
struct
seq_file
*
m
)
...
...
arch/powerpc/platforms/powermac/time.c
View file @
0174f72f
...
@@ -102,7 +102,7 @@ static unsigned long from_rtc_time(struct rtc_time *tm)
...
@@ -102,7 +102,7 @@ static unsigned long from_rtc_time(struct rtc_time *tm)
static
unsigned
long
cuda_get_time
(
void
)
static
unsigned
long
cuda_get_time
(
void
)
{
{
struct
adb_request
req
;
struct
adb_request
req
;
unsigned
long
now
;
unsigned
int
now
;
if
(
cuda_request
(
&
req
,
NULL
,
2
,
CUDA_PACKET
,
CUDA_GET_TIME
)
<
0
)
if
(
cuda_request
(
&
req
,
NULL
,
2
,
CUDA_PACKET
,
CUDA_GET_TIME
)
<
0
)
return
0
;
return
0
;
...
@@ -113,7 +113,7 @@ static unsigned long cuda_get_time(void)
...
@@ -113,7 +113,7 @@ static unsigned long cuda_get_time(void)
req
.
reply_len
);
req
.
reply_len
);
now
=
(
req
.
reply
[
3
]
<<
24
)
+
(
req
.
reply
[
4
]
<<
16
)
now
=
(
req
.
reply
[
3
]
<<
24
)
+
(
req
.
reply
[
4
]
<<
16
)
+
(
req
.
reply
[
5
]
<<
8
)
+
req
.
reply
[
6
];
+
(
req
.
reply
[
5
]
<<
8
)
+
req
.
reply
[
6
];
return
now
-
RTC_OFFSET
;
return
((
unsigned
long
)
now
)
-
RTC_OFFSET
;
}
}
#define cuda_get_rtc_time(tm) to_rtc_time(cuda_get_time(), (tm))
#define cuda_get_rtc_time(tm) to_rtc_time(cuda_get_time(), (tm))
...
@@ -146,7 +146,7 @@ static int cuda_set_rtc_time(struct rtc_time *tm)
...
@@ -146,7 +146,7 @@ static int cuda_set_rtc_time(struct rtc_time *tm)
static
unsigned
long
pmu_get_time
(
void
)
static
unsigned
long
pmu_get_time
(
void
)
{
{
struct
adb_request
req
;
struct
adb_request
req
;
unsigned
long
now
;
unsigned
int
now
;
if
(
pmu_request
(
&
req
,
NULL
,
1
,
PMU_READ_RTC
)
<
0
)
if
(
pmu_request
(
&
req
,
NULL
,
1
,
PMU_READ_RTC
)
<
0
)
return
0
;
return
0
;
...
@@ -156,7 +156,7 @@ static unsigned long pmu_get_time(void)
...
@@ -156,7 +156,7 @@ static unsigned long pmu_get_time(void)
req
.
reply_len
);
req
.
reply_len
);
now
=
(
req
.
reply
[
0
]
<<
24
)
+
(
req
.
reply
[
1
]
<<
16
)
now
=
(
req
.
reply
[
0
]
<<
24
)
+
(
req
.
reply
[
1
]
<<
16
)
+
(
req
.
reply
[
2
]
<<
8
)
+
req
.
reply
[
3
];
+
(
req
.
reply
[
2
]
<<
8
)
+
req
.
reply
[
3
];
return
now
-
RTC_OFFSET
;
return
((
unsigned
long
)
now
)
-
RTC_OFFSET
;
}
}
#define pmu_get_rtc_time(tm) to_rtc_time(pmu_get_time(), (tm))
#define pmu_get_rtc_time(tm) to_rtc_time(pmu_get_time(), (tm))
...
@@ -199,6 +199,7 @@ static unsigned long smu_get_time(void)
...
@@ -199,6 +199,7 @@ static unsigned long smu_get_time(void)
#define smu_set_rtc_time(tm, spin) 0
#define smu_set_rtc_time(tm, spin) 0
#endif
#endif
/* Can't be __init, it's called when suspending and resuming */
unsigned
long
pmac_get_boot_time
(
void
)
unsigned
long
pmac_get_boot_time
(
void
)
{
{
/* Get the time from the RTC, used only at boot time */
/* Get the time from the RTC, used only at boot time */
...
...
arch/powerpc/platforms/pseries/Makefile
View file @
0174f72f
...
@@ -5,3 +5,6 @@ obj-$(CONFIG_IBMVIO) += vio.o
...
@@ -5,3 +5,6 @@ obj-$(CONFIG_IBMVIO) += vio.o
obj-$(CONFIG_XICS)
+=
xics.o
obj-$(CONFIG_XICS)
+=
xics.o
obj-$(CONFIG_SCANLOG)
+=
scanlog.o
obj-$(CONFIG_SCANLOG)
+=
scanlog.o
obj-$(CONFIG_EEH)
+=
eeh.o eeh_event.o
obj-$(CONFIG_EEH)
+=
eeh.o eeh_event.o
obj-$(CONFIG_HVC_CONSOLE)
+=
hvconsole.o
obj-$(CONFIG_HVCS)
+=
hvcserver.o
arch/p
pc64/kernel
/hvconsole.c
→
arch/p
owerpc/platforms/pseries
/hvconsole.c
View file @
0174f72f
File moved
arch/p
pc64/kernel
/hvcserver.c
→
arch/p
owerpc/platforms/pseries
/hvcserver.c
View file @
0174f72f
File moved
arch/powerpc/platforms/pseries/setup.c
View file @
0174f72f
...
@@ -200,14 +200,12 @@ static void __init pSeries_setup_arch(void)
...
@@ -200,14 +200,12 @@ static void __init pSeries_setup_arch(void)
if
(
ppc64_interrupt_controller
==
IC_OPEN_PIC
)
{
if
(
ppc64_interrupt_controller
==
IC_OPEN_PIC
)
{
ppc_md
.
init_IRQ
=
pSeries_init_mpic
;
ppc_md
.
init_IRQ
=
pSeries_init_mpic
;
ppc_md
.
get_irq
=
mpic_get_irq
;
ppc_md
.
get_irq
=
mpic_get_irq
;
ppc_md
.
cpu_irq_down
=
mpic_teardown_this_cpu
;
/* Allocate the mpic now, so that find_and_init_phbs() can
/* Allocate the mpic now, so that find_and_init_phbs() can
* fill the ISUs */
* fill the ISUs */
pSeries_setup_mpic
();
pSeries_setup_mpic
();
}
else
{
}
else
{
ppc_md
.
init_IRQ
=
xics_init_IRQ
;
ppc_md
.
init_IRQ
=
xics_init_IRQ
;
ppc_md
.
get_irq
=
xics_get_irq
;
ppc_md
.
get_irq
=
xics_get_irq
;
ppc_md
.
cpu_irq_down
=
xics_teardown_cpu
;
}
}
#ifdef CONFIG_SMP
#ifdef CONFIG_SMP
...
@@ -595,6 +593,27 @@ static int pSeries_pci_probe_mode(struct pci_bus *bus)
...
@@ -595,6 +593,27 @@ static int pSeries_pci_probe_mode(struct pci_bus *bus)
return
PCI_PROBE_NORMAL
;
return
PCI_PROBE_NORMAL
;
}
}
#ifdef CONFIG_KEXEC
static
void
pseries_kexec_cpu_down
(
int
crash_shutdown
,
int
secondary
)
{
/* Don't risk a hypervisor call if we're crashing */
if
(
!
crash_shutdown
)
{
unsigned
long
vpa
=
__pa
(
&
get_paca
()
->
lppaca
);
if
(
unregister_vpa
(
hard_smp_processor_id
(),
vpa
))
{
printk
(
"VPA deregistration of cpu %u (hw_cpu_id %d) "
"failed
\n
"
,
smp_processor_id
(),
hard_smp_processor_id
());
}
}
if
(
ppc64_interrupt_controller
==
IC_OPEN_PIC
)
mpic_teardown_this_cpu
(
secondary
);
else
xics_teardown_cpu
(
secondary
);
}
#endif
struct
machdep_calls
__initdata
pSeries_md
=
{
struct
machdep_calls
__initdata
pSeries_md
=
{
.
probe
=
pSeries_probe
,
.
probe
=
pSeries_probe
,
.
setup_arch
=
pSeries_setup_arch
,
.
setup_arch
=
pSeries_setup_arch
,
...
@@ -617,4 +636,7 @@ struct machdep_calls __initdata pSeries_md = {
...
@@ -617,4 +636,7 @@ struct machdep_calls __initdata pSeries_md = {
.
check_legacy_ioport
=
pSeries_check_legacy_ioport
,
.
check_legacy_ioport
=
pSeries_check_legacy_ioport
,
.
system_reset_exception
=
pSeries_system_reset_exception
,
.
system_reset_exception
=
pSeries_system_reset_exception
,
.
machine_check_exception
=
pSeries_machine_check_exception
,
.
machine_check_exception
=
pSeries_machine_check_exception
,
#ifdef CONFIG_KEXEC
.
kexec_cpu_down
=
pseries_kexec_cpu_down
,
#endif
};
};
arch/ppc64/Kconfig
deleted
100644 → 0
View file @
302fe175
#
# For a description of the syntax of this configuration file,
# see Documentation/kbuild/kconfig-language.txt.
#
config 64BIT
def_bool y
config MMU
bool
default y
config PPC_STD_MMU
def_bool y
config UID16
bool
config RWSEM_GENERIC_SPINLOCK
bool
config RWSEM_XCHGADD_ALGORITHM
bool
default y
config GENERIC_CALIBRATE_DELAY
bool
default y
config GENERIC_ISA_DMA
bool
default y
config EARLY_PRINTK
bool
default y
config COMPAT
bool
default y
config SCHED_NO_NO_OMIT_FRAME_POINTER
bool
default y
config ARCH_MAY_HAVE_PC_FDC
bool
default y
config PPC_STD_MMU
bool
default y
# We optimistically allocate largepages from the VM, so make the limit
# large enough (16MB). This badly named config option is actually
# max order + 1
config FORCE_MAX_ZONEORDER
int
default "9" if PPC_64K_PAGES
default "13"
source "init/Kconfig"
config SYSVIPC_COMPAT
bool
depends on COMPAT && SYSVIPC
default y
menu "Platform support"
choice
prompt "Platform Type"
default PPC_MULTIPLATFORM
config PPC_ISERIES
bool "IBM Legacy iSeries"
config PPC_MULTIPLATFORM
bool "Generic"
endchoice
config PPC_PSERIES
depends on PPC_MULTIPLATFORM
bool " IBM pSeries & new iSeries"
default y
config PPC_BPA
bool " Broadband Processor Architecture"
depends on PPC_MULTIPLATFORM
config PPC_PMAC
depends on PPC_MULTIPLATFORM
bool " Apple G5 based machines"
default y
select U3_DART
select GENERIC_TBSYNC
config PPC_MAPLE
depends on PPC_MULTIPLATFORM
bool " Maple 970FX Evaluation Board"
select U3_DART
select MPIC_BROKEN_U3
select GENERIC_TBSYNC
default n
help
This option enables support for the Maple 970FX Evaluation Board.
For more informations, refer to <http://www.970eval.com>
config PPC
bool
default y
config PPC64
bool
default y
config PPC_OF
depends on PPC_MULTIPLATFORM
bool
default y
config XICS
depends on PPC_PSERIES
bool
default y
config MPIC
depends on PPC_PSERIES || PPC_PMAC || PPC_MAPLE
bool
default y
config PPC_I8259
depends on PPC_PSERIES
bool
default y
config BPA_IIC
depends on PPC_BPA
bool
default y
# VMX is pSeries only for now until somebody writes the iSeries
# exception vectors for it
config ALTIVEC
bool "Support for VMX (Altivec) vector unit"
depends on PPC_MULTIPLATFORM
default y
config PPC_SPLPAR
depends on PPC_PSERIES
bool "Support for shared-processor logical partitions"
default n
help
Enabling this option will make the kernel run more efficiently
on logically-partitioned pSeries systems which use shared
processors, that is, which share physical processors between
two or more partitions.
config KEXEC
bool "kexec system call (EXPERIMENTAL)"
depends on PPC_MULTIPLATFORM && EXPERIMENTAL
help
kexec is a system call that implements the ability to shutdown your
current kernel, and to start another kernel. It is like a reboot
but it is indepedent of the system firmware. And like a reboot
you can start any kernel with it, not just Linux.
The name comes from the similiarity to the exec system call.
It is an ongoing process to be certain the hardware in a machine
is properly shutdown, so do not be surprised if this code does not
initially work for you. It may help to enable device hotplugging
support. As of this writing the exact hardware interface is
strongly in flux, so no good recommendation can be made.
source "drivers/cpufreq/Kconfig"
config CPU_FREQ_PMAC64
bool "Support for some Apple G5s"
depends on CPU_FREQ && PMAC_SMU && PPC64
select CPU_FREQ_TABLE
help
This adds support for frequency switching on Apple iMac G5,
and some of the more recent desktop G5 machines as well.
config IBMVIO
depends on PPC_PSERIES || PPC_ISERIES
bool
default y
config U3_DART
bool
depends on PPC_MULTIPLATFORM
default n
config MPIC_BROKEN_U3
bool
depends on PPC_MAPLE
default y
config GENERIC_TBSYNC
def_bool n
config PPC_PMAC64
bool
depends on PPC_PMAC
default y
config BOOTX_TEXT
bool "Support for early boot text console"
depends PPC_OF
help
Say Y here to see progress messages from the boot firmware in text
mode. Requires an Open Firmware compatible video card.
config POWER4
def_bool y
config PPC_FPU
def_bool y
config POWER4_ONLY
bool "Optimize for POWER4"
default n
---help---
Cause the compiler to optimize for POWER4 processors. The resulting
binary will not work on POWER3 or RS64 processors when compiled with
binutils 2.15 or later.
config IOMMU_VMERGE
bool "Enable IOMMU virtual merging (EXPERIMENTAL)"
depends on EXPERIMENTAL
default n
help
Cause IO segments sent to a device for DMA to be merged virtually
by the IOMMU when they happen to have been allocated contiguously.
This doesn't add pressure to the IOMMU allocator. However, some
drivers don't support getting large merged segments coming back
from *_map_sg(). Say Y if you know the drivers you are using are
properly handling this case.
config SMP
bool "Symmetric multi-processing support"
---help---
This enables support for systems with more than one CPU. If you have
a system with only one CPU, say N. If you have a system with more
than one CPU, say Y.
If you say N here, the kernel will run on single and multiprocessor
machines, but will use only one CPU of a multiprocessor machine. If
you say Y here, the kernel will run on single-processor machines.
On a single-processor machine, the kernel will run faster if you say
N here.
If you don't know what to do here, say Y.
config NR_CPUS
int "Maximum number of CPUs (2-128)"
range 2 128
depends on SMP
default "32"
config HMT
bool "Hardware multithreading"
depends on SMP && PPC_PSERIES && BROKEN
help
This option enables hardware multithreading on RS64 cpus.
pSeries systems p620 and p660 have such a cpu type.
config NUMA
bool "NUMA support"
default y if SMP && PPC_PSERIES
config ARCH_SELECT_MEMORY_MODEL
def_bool y
config ARCH_FLATMEM_ENABLE
def_bool y
depends on !NUMA
config ARCH_SPARSEMEM_ENABLE
def_bool y
config ARCH_SPARSEMEM_DEFAULT
def_bool y
depends on NUMA
source "mm/Kconfig"
config HAVE_ARCH_EARLY_PFN_TO_NID
def_bool y
depends on NEED_MULTIPLE_NODES
config ARCH_MEMORY_PROBE
def_bool y
depends on MEMORY_HOTPLUG
# Some NUMA nodes have memory ranges that span
# other nodes. Even though a pfn is valid and
# between a node's start and end pfns, it may not
# reside on that node.
#
# This is a relatively temporary hack that should
# be able to go away when sparsemem is fully in
# place
config NODES_SPAN_OTHER_NODES
def_bool y
depends on NEED_MULTIPLE_NODES
config PPC_64K_PAGES
bool "64k page size"
help
This option changes the kernel logical page size to 64k. On machines
without processor support for 64k pages, the kernel will simulate
them by loading each individual 4k page on demand transparently,
while on hardware with such support, it will be used to map
normal application pages.
config SCHED_SMT
bool "SMT (Hyperthreading) scheduler support"
depends on SMP
default off
help
SMT scheduler support improves the CPU scheduler's decision making
when dealing with POWER5 cpus at a cost of slightly increased
overhead in some places. If unsure say N here.
source "kernel/Kconfig.preempt"
source kernel/Kconfig.hz
config EEH
bool "PCI Extended Error Handling (EEH)" if EMBEDDED
depends on PPC_PSERIES
default y if !EMBEDDED
#
# Use the generic interrupt handling code in kernel/irq/:
#
config GENERIC_HARDIRQS
bool
default y
config PPC_RTAS
bool
depends on PPC_PSERIES || PPC_BPA
default y
config RTAS_ERROR_LOGGING
bool
depends on PPC_RTAS
default y
config RTAS_PROC
bool "Proc interface to RTAS"
depends on PPC_RTAS
default y
config RTAS_FLASH
tristate "Firmware flash interface"
depends on RTAS_PROC
config SCANLOG
tristate "Scanlog dump interface"
depends on RTAS_PROC && PPC_PSERIES
config LPARCFG
tristate "LPAR Configuration Data"
depends on PPC_PSERIES || PPC_ISERIES
help
Provide system capacity information via human readable
<key word>=<value> pairs through a /proc/ppc64/lparcfg interface.
config SECCOMP
bool "Enable seccomp to safely compute untrusted bytecode"
depends on PROC_FS
default y
help
This kernel feature is useful for number crunching applications
that may need to compute untrusted bytecode during their
execution. By using pipes or other transports made available to
the process as file descriptors supporting the read/write
syscalls, it's possible to isolate those applications in
their own address space using seccomp. Once seccomp is
enabled via /proc/<pid>/seccomp, it cannot be disabled
and the task is only allowed to execute a few safe syscalls
defined by each seccomp mode.
If unsure, say Y. Only embedded should say N here.
source "fs/Kconfig.binfmt"
config HOTPLUG_CPU
bool "Support for hot-pluggable CPUs"
depends on SMP && EXPERIMENTAL && (PPC_PSERIES || PPC_PMAC)
select HOTPLUG
---help---
Say Y here to be able to turn CPUs off and on.
Say N if you are unsure.
config PROC_DEVICETREE
bool "Support for Open Firmware device tree in /proc"
help
This option adds a device-tree directory under /proc which contains
an image of the device tree that the kernel copies from Open
Firmware. If unsure, say Y here.
config CMDLINE_BOOL
bool "Default bootloader kernel arguments"
depends on !PPC_ISERIES
config CMDLINE
string "Initial kernel command string"
depends on CMDLINE_BOOL
default "console=ttyS0,9600 console=tty0 root=/dev/sda2"
help
On some platforms, there is currently no way for the boot loader to
pass arguments to the kernel. For these platforms, you can supply
some command-line options at build time by entering them here. In
most cases you will need to specify the root device here.
endmenu
config ISA_DMA_API
bool
default y
menu "Bus Options"
config ISA
bool
help
Find out whether you have ISA slots on your motherboard. ISA is the
name of a bus system, i.e. the way the CPU talks to the other stuff
inside your box. If you have an Apple machine, say N here; if you
have an IBM RS/6000 or pSeries machine or a PReP machine, say Y. If
you have an embedded board, consult your board documentation.
config SBUS
bool
config MCA
bool
config EISA
bool
config PCI
bool "support for PCI devices" if (EMBEDDED && PPC_ISERIES)
default y
help
Find out whether your system includes a PCI bus. PCI is the name of
a bus system, i.e. the way the CPU talks to the other stuff inside
your box. If you say Y here, the kernel will include drivers and
infrastructure code to support PCI bus devices.
config PCI_DOMAINS
bool
default PCI
source "drivers/pci/Kconfig"
source "drivers/pcmcia/Kconfig"
source "drivers/pci/hotplug/Kconfig"
endmenu
source "net/Kconfig"
source "drivers/Kconfig"
source "fs/Kconfig"
menu "iSeries device drivers"
depends on PPC_ISERIES
config VIOCONS
tristate "iSeries Virtual Console Support"
config VIODASD
tristate "iSeries Virtual I/O disk support"
help
If you are running on an iSeries system and you want to use
virtual disks created and managed by OS/400, say Y.
config VIOCD
tristate "iSeries Virtual I/O CD support"
help
If you are running Linux on an IBM iSeries system and you want to
read a CD drive owned by OS/400, say Y here.
config VIOTAPE
tristate "iSeries Virtual Tape Support"
help
If you are running Linux on an iSeries system and you want Linux
to read and/or write a tape drive owned by OS/400, say Y here.
endmenu
config VIOPATH
bool
depends on VIOCONS || VIODASD || VIOCD || VIOTAPE || VETH
default y
source "arch/powerpc/oprofile/Kconfig"
source "arch/ppc64/Kconfig.debug"
source "security/Kconfig"
config KEYS_COMPAT
bool
depends on COMPAT && KEYS
default y
source "crypto/Kconfig"
source "lib/Kconfig"
arch/ppc64/kernel/Makefile
View file @
0174f72f
...
@@ -2,45 +2,6 @@
...
@@ -2,45 +2,6 @@
# Makefile for the linux ppc64 kernel.
# Makefile for the linux ppc64 kernel.
#
#
ifneq
($(CONFIG_PPC_MERGE),y)
obj-y
+=
idle.o align.o
EXTRA_CFLAGS
+=
-mno-minimal-toc
extra-y
:=
head.o vmlinux.lds
obj-y
:=
misc.o prom.o
endif
obj-y
+=
idle.o dma.o
\
align.o
\
rtc.o
\
iommu.o
pci-obj-$(CONFIG_PPC_MULTIPLATFORM)
+=
pci_dn.o pci_direct_iommu.o
obj-$(CONFIG_PCI)
+=
pci.o pci_iommu.o iomap.o
$
(
pci-obj-y
)
obj-$(CONFIG_PPC_MULTIPLATFORM)
+=
nvram.o
obj-$(CONFIG_PPC_MULTIPLATFORM)
+=
nvram.o
ifneq
($(CONFIG_PPC_MERGE),y)
obj-$(CONFIG_PPC_MULTIPLATFORM)
+=
prom_init.o
endif
obj-$(CONFIG_KEXEC)
+=
machine_kexec.o
obj-$(CONFIG_MODULES)
+=
module.o
ifneq
($(CONFIG_PPC_MERGE),y)
obj-$(CONFIG_MODULES)
+=
ppc_ksyms.o
endif
obj-$(CONFIG_HVC_CONSOLE)
+=
hvconsole.o
ifneq
($(CONFIG_PPC_MERGE),y)
obj-$(CONFIG_BOOTX_TEXT)
+=
btext.o
endif
obj-$(CONFIG_HVCS)
+=
hvcserver.o
obj-$(CONFIG_KPROBES)
+=
kprobes.o
ifneq
($(CONFIG_PPC_MERGE),y)
ifeq
($(CONFIG_PPC_ISERIES),y)
arch/ppc64/kernel/head.o
:
arch/powerpc/kernel/lparmap.s
AFLAGS_head.o
+=
-Iarch
/powerpc/kernel
endif
endif
arch/ppc64/kernel/asm-offsets.c
deleted
100644 → 0
View file @
302fe175
/*
* This program is used to generate definitions needed by
* assembly language modules.
*
* We use the technique used in the OSF Mach kernel code:
* generate asm statements containing #defines,
* compile this file to assembler, and then extract the
* #defines from the assembly-language output.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/config.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/mman.h>
#include <linux/mm.h>
#include <linux/time.h>
#include <linux/hardirq.h>
#include <asm/io.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/processor.h>
#include <asm/paca.h>
#include <asm/lppaca.h>
#include <asm/iseries/hv_lp_event.h>
#include <asm/rtas.h>
#include <asm/cputable.h>
#include <asm/cache.h>
#include <asm/systemcfg.h>
#include <asm/compat.h>
#define DEFINE(sym, val) \
asm volatile("\n->" #sym " %0 " #val : : "i" (val))
#define BLANK() asm volatile("\n->" : : )
int
main
(
void
)
{
/* thread struct on stack */
DEFINE
(
TI_FLAGS
,
offsetof
(
struct
thread_info
,
flags
));
DEFINE
(
TI_PREEMPT
,
offsetof
(
struct
thread_info
,
preempt_count
));
DEFINE
(
TI_SC_NOERR
,
offsetof
(
struct
thread_info
,
syscall_noerror
));
/* task_struct->thread */
DEFINE
(
THREAD
,
offsetof
(
struct
task_struct
,
thread
));
DEFINE
(
PT_REGS
,
offsetof
(
struct
thread_struct
,
regs
));
DEFINE
(
THREAD_FPEXC_MODE
,
offsetof
(
struct
thread_struct
,
fpexc_mode
));
DEFINE
(
THREAD_FPR0
,
offsetof
(
struct
thread_struct
,
fpr
[
0
]));
DEFINE
(
THREAD_FPSCR
,
offsetof
(
struct
thread_struct
,
fpscr
));
DEFINE
(
KSP
,
offsetof
(
struct
thread_struct
,
ksp
));
DEFINE
(
KSP_VSID
,
offsetof
(
struct
thread_struct
,
ksp_vsid
));
#ifdef CONFIG_ALTIVEC
DEFINE
(
THREAD_VR0
,
offsetof
(
struct
thread_struct
,
vr
[
0
]));
DEFINE
(
THREAD_VRSAVE
,
offsetof
(
struct
thread_struct
,
vrsave
));
DEFINE
(
THREAD_VSCR
,
offsetof
(
struct
thread_struct
,
vscr
));
DEFINE
(
THREAD_USED_VR
,
offsetof
(
struct
thread_struct
,
used_vr
));
#endif
/* CONFIG_ALTIVEC */
DEFINE
(
MM
,
offsetof
(
struct
task_struct
,
mm
));
DEFINE
(
AUDITCONTEXT
,
offsetof
(
struct
task_struct
,
audit_context
));
DEFINE
(
DCACHEL1LINESIZE
,
offsetof
(
struct
ppc64_caches
,
dline_size
));
DEFINE
(
DCACHEL1LOGLINESIZE
,
offsetof
(
struct
ppc64_caches
,
log_dline_size
));
DEFINE
(
DCACHEL1LINESPERPAGE
,
offsetof
(
struct
ppc64_caches
,
dlines_per_page
));
DEFINE
(
ICACHEL1LINESIZE
,
offsetof
(
struct
ppc64_caches
,
iline_size
));
DEFINE
(
ICACHEL1LOGLINESIZE
,
offsetof
(
struct
ppc64_caches
,
log_iline_size
));
DEFINE
(
ICACHEL1LINESPERPAGE
,
offsetof
(
struct
ppc64_caches
,
ilines_per_page
));
DEFINE
(
PLATFORM_LPAR
,
PLATFORM_LPAR
);
/* paca */
DEFINE
(
PACA_SIZE
,
sizeof
(
struct
paca_struct
));
DEFINE
(
PACAPACAINDEX
,
offsetof
(
struct
paca_struct
,
paca_index
));
DEFINE
(
PACAPROCSTART
,
offsetof
(
struct
paca_struct
,
cpu_start
));
DEFINE
(
PACAKSAVE
,
offsetof
(
struct
paca_struct
,
kstack
));
DEFINE
(
PACACURRENT
,
offsetof
(
struct
paca_struct
,
__current
));
DEFINE
(
PACASAVEDMSR
,
offsetof
(
struct
paca_struct
,
saved_msr
));
DEFINE
(
PACASTABREAL
,
offsetof
(
struct
paca_struct
,
stab_real
));
DEFINE
(
PACASTABVIRT
,
offsetof
(
struct
paca_struct
,
stab_addr
));
DEFINE
(
PACASTABRR
,
offsetof
(
struct
paca_struct
,
stab_rr
));
DEFINE
(
PACAR1
,
offsetof
(
struct
paca_struct
,
saved_r1
));
DEFINE
(
PACATOC
,
offsetof
(
struct
paca_struct
,
kernel_toc
));
DEFINE
(
PACAPROCENABLED
,
offsetof
(
struct
paca_struct
,
proc_enabled
));
DEFINE
(
PACASLBCACHE
,
offsetof
(
struct
paca_struct
,
slb_cache
));
DEFINE
(
PACASLBCACHEPTR
,
offsetof
(
struct
paca_struct
,
slb_cache_ptr
));
DEFINE
(
PACACONTEXTID
,
offsetof
(
struct
paca_struct
,
context
.
id
));
#ifdef CONFIG_PPC_64K_PAGES
DEFINE
(
PACAPGDIR
,
offsetof
(
struct
paca_struct
,
pgdir
));
#endif
#ifdef CONFIG_HUGETLB_PAGE
DEFINE
(
PACALOWHTLBAREAS
,
offsetof
(
struct
paca_struct
,
context
.
low_htlb_areas
));
DEFINE
(
PACAHIGHHTLBAREAS
,
offsetof
(
struct
paca_struct
,
context
.
high_htlb_areas
));
#endif
/* CONFIG_HUGETLB_PAGE */
DEFINE
(
PACADEFAULTDECR
,
offsetof
(
struct
paca_struct
,
default_decr
));
DEFINE
(
PACA_EXGEN
,
offsetof
(
struct
paca_struct
,
exgen
));
DEFINE
(
PACA_EXMC
,
offsetof
(
struct
paca_struct
,
exmc
));
DEFINE
(
PACA_EXSLB
,
offsetof
(
struct
paca_struct
,
exslb
));
DEFINE
(
PACA_EXDSI
,
offsetof
(
struct
paca_struct
,
exdsi
));
DEFINE
(
PACAEMERGSP
,
offsetof
(
struct
paca_struct
,
emergency_sp
));
DEFINE
(
PACALPPACA
,
offsetof
(
struct
paca_struct
,
lppaca
));
DEFINE
(
PACAHWCPUID
,
offsetof
(
struct
paca_struct
,
hw_cpu_id
));
DEFINE
(
LPPACASRR0
,
offsetof
(
struct
lppaca
,
saved_srr0
));
DEFINE
(
LPPACASRR1
,
offsetof
(
struct
lppaca
,
saved_srr1
));
DEFINE
(
LPPACAANYINT
,
offsetof
(
struct
lppaca
,
int_dword
.
any_int
));
DEFINE
(
LPPACADECRINT
,
offsetof
(
struct
lppaca
,
int_dword
.
fields
.
decr_int
));
/* RTAS */
DEFINE
(
RTASBASE
,
offsetof
(
struct
rtas_t
,
base
));
DEFINE
(
RTASENTRY
,
offsetof
(
struct
rtas_t
,
entry
));
/* Interrupt register frame */
DEFINE
(
STACK_FRAME_OVERHEAD
,
STACK_FRAME_OVERHEAD
);
DEFINE
(
SWITCH_FRAME_SIZE
,
STACK_FRAME_OVERHEAD
+
sizeof
(
struct
pt_regs
));
/* 288 = # of volatile regs, int & fp, for leaf routines */
/* which do not stack a frame. See the PPC64 ABI. */
DEFINE
(
INT_FRAME_SIZE
,
STACK_FRAME_OVERHEAD
+
sizeof
(
struct
pt_regs
)
+
288
);
/* Create extra stack space for SRR0 and SRR1 when calling prom/rtas. */
DEFINE
(
PROM_FRAME_SIZE
,
STACK_FRAME_OVERHEAD
+
sizeof
(
struct
pt_regs
)
+
16
);
DEFINE
(
RTAS_FRAME_SIZE
,
STACK_FRAME_OVERHEAD
+
sizeof
(
struct
pt_regs
)
+
16
);
DEFINE
(
GPR0
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
gpr
[
0
]));
DEFINE
(
GPR1
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
gpr
[
1
]));
DEFINE
(
GPR2
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
gpr
[
2
]));
DEFINE
(
GPR3
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
gpr
[
3
]));
DEFINE
(
GPR4
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
gpr
[
4
]));
DEFINE
(
GPR5
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
gpr
[
5
]));
DEFINE
(
GPR6
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
gpr
[
6
]));
DEFINE
(
GPR7
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
gpr
[
7
]));
DEFINE
(
GPR8
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
gpr
[
8
]));
DEFINE
(
GPR9
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
gpr
[
9
]));
DEFINE
(
GPR10
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
gpr
[
10
]));
DEFINE
(
GPR11
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
gpr
[
11
]));
DEFINE
(
GPR12
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
gpr
[
12
]));
DEFINE
(
GPR13
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
gpr
[
13
]));
/*
* Note: these symbols include _ because they overlap with special
* register names
*/
DEFINE
(
_NIP
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
nip
));
DEFINE
(
_MSR
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
msr
));
DEFINE
(
_CTR
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
ctr
));
DEFINE
(
_LINK
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
link
));
DEFINE
(
_CCR
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
ccr
));
DEFINE
(
_XER
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
xer
));
DEFINE
(
_DAR
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
dar
));
DEFINE
(
_DSISR
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
dsisr
));
DEFINE
(
ORIG_GPR3
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
orig_gpr3
));
DEFINE
(
RESULT
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
result
));
DEFINE
(
_TRAP
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
trap
));
DEFINE
(
SOFTE
,
STACK_FRAME_OVERHEAD
+
offsetof
(
struct
pt_regs
,
softe
));
/* These _only_ to be used with {PROM,RTAS}_FRAME_SIZE!!! */
DEFINE
(
_SRR0
,
STACK_FRAME_OVERHEAD
+
sizeof
(
struct
pt_regs
));
DEFINE
(
_SRR1
,
STACK_FRAME_OVERHEAD
+
sizeof
(
struct
pt_regs
)
+
8
);
DEFINE
(
CLONE_VM
,
CLONE_VM
);
DEFINE
(
CLONE_UNTRACED
,
CLONE_UNTRACED
);
/* About the CPU features table */
DEFINE
(
CPU_SPEC_ENTRY_SIZE
,
sizeof
(
struct
cpu_spec
));
DEFINE
(
CPU_SPEC_PVR_MASK
,
offsetof
(
struct
cpu_spec
,
pvr_mask
));
DEFINE
(
CPU_SPEC_PVR_VALUE
,
offsetof
(
struct
cpu_spec
,
pvr_value
));
DEFINE
(
CPU_SPEC_FEATURES
,
offsetof
(
struct
cpu_spec
,
cpu_features
));
DEFINE
(
CPU_SPEC_SETUP
,
offsetof
(
struct
cpu_spec
,
cpu_setup
));
/* systemcfg offsets for use by vdso */
DEFINE
(
CFG_TB_ORIG_STAMP
,
offsetof
(
struct
systemcfg
,
tb_orig_stamp
));
DEFINE
(
CFG_TB_TICKS_PER_SEC
,
offsetof
(
struct
systemcfg
,
tb_ticks_per_sec
));
DEFINE
(
CFG_TB_TO_XS
,
offsetof
(
struct
systemcfg
,
tb_to_xs
));
DEFINE
(
CFG_STAMP_XSEC
,
offsetof
(
struct
systemcfg
,
stamp_xsec
));
DEFINE
(
CFG_TB_UPDATE_COUNT
,
offsetof
(
struct
systemcfg
,
tb_update_count
));
DEFINE
(
CFG_TZ_MINUTEWEST
,
offsetof
(
struct
systemcfg
,
tz_minuteswest
));
DEFINE
(
CFG_TZ_DSTTIME
,
offsetof
(
struct
systemcfg
,
tz_dsttime
));
DEFINE
(
CFG_SYSCALL_MAP32
,
offsetof
(
struct
systemcfg
,
syscall_map_32
));
DEFINE
(
CFG_SYSCALL_MAP64
,
offsetof
(
struct
systemcfg
,
syscall_map_64
));
/* timeval/timezone offsets for use by vdso */
DEFINE
(
TVAL64_TV_SEC
,
offsetof
(
struct
timeval
,
tv_sec
));
DEFINE
(
TVAL64_TV_USEC
,
offsetof
(
struct
timeval
,
tv_usec
));
DEFINE
(
TVAL32_TV_SEC
,
offsetof
(
struct
compat_timeval
,
tv_sec
));
DEFINE
(
TVAL32_TV_USEC
,
offsetof
(
struct
compat_timeval
,
tv_usec
));
DEFINE
(
TZONE_TZ_MINWEST
,
offsetof
(
struct
timezone
,
tz_minuteswest
));
DEFINE
(
TZONE_TZ_DSTTIME
,
offsetof
(
struct
timezone
,
tz_dsttime
));
return
0
;
}
arch/ppc64/kernel/btext.c
deleted
100644 → 0
View file @
302fe175
/*
* Procedures for drawing on the screen early on in the boot process.
*
* Benjamin Herrenschmidt <benh@kernel.crashing.org>
*/
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/init.h>
#include <asm/sections.h>
#include <asm/prom.h>
#include <asm/btext.h>
#include <asm/prom.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/lmb.h>
#include <asm/processor.h>
#include <asm/udbg.h>
#undef NO_SCROLL
#ifndef NO_SCROLL
static
void
scrollscreen
(
void
);
#endif
static
void
draw_byte
(
unsigned
char
c
,
long
locX
,
long
locY
);
static
void
draw_byte_32
(
unsigned
char
*
bits
,
unsigned
int
*
base
,
int
rb
);
static
void
draw_byte_16
(
unsigned
char
*
bits
,
unsigned
int
*
base
,
int
rb
);
static
void
draw_byte_8
(
unsigned
char
*
bits
,
unsigned
int
*
base
,
int
rb
);
static
int
g_loc_X
;
static
int
g_loc_Y
;
static
int
g_max_loc_X
;
static
int
g_max_loc_Y
;
static
int
dispDeviceRowBytes
;
static
int
dispDeviceDepth
;
static
int
dispDeviceRect
[
4
];
static
unsigned
char
*
dispDeviceBase
,
*
logicalDisplayBase
;
unsigned
long
disp_BAT
[
2
]
__initdata
=
{
0
,
0
};
#define cmapsz (16*256)
static
unsigned
char
vga_font
[
cmapsz
];
int
boot_text_mapped
;
int
force_printk_to_btext
=
0
;
/* Here's a small text engine to use during early boot
* or for debugging purposes
*
* todo:
*
* - build some kind of vgacon with it to enable early printk
* - move to a separate file
* - add a few video driver hooks to keep in sync with display
* changes.
*/
void
map_boot_text
(
void
)
{
unsigned
long
base
,
offset
,
size
;
unsigned
char
*
vbase
;
/* By default, we are no longer mapped */
boot_text_mapped
=
0
;
if
(
dispDeviceBase
==
0
)
return
;
base
=
((
unsigned
long
)
dispDeviceBase
)
&
0xFFFFF000UL
;
offset
=
((
unsigned
long
)
dispDeviceBase
)
-
base
;
size
=
dispDeviceRowBytes
*
dispDeviceRect
[
3
]
+
offset
+
dispDeviceRect
[
0
];
vbase
=
__ioremap
(
base
,
size
,
_PAGE_NO_CACHE
);
if
(
vbase
==
0
)
return
;
logicalDisplayBase
=
vbase
+
offset
;
boot_text_mapped
=
1
;
}
int
btext_initialize
(
struct
device_node
*
np
)
{
unsigned
int
width
,
height
,
depth
,
pitch
;
unsigned
long
address
=
0
;
u32
*
prop
;
prop
=
(
u32
*
)
get_property
(
np
,
"width"
,
NULL
);
if
(
prop
==
NULL
)
return
-
EINVAL
;
width
=
*
prop
;
prop
=
(
u32
*
)
get_property
(
np
,
"height"
,
NULL
);
if
(
prop
==
NULL
)
return
-
EINVAL
;
height
=
*
prop
;
prop
=
(
u32
*
)
get_property
(
np
,
"depth"
,
NULL
);
if
(
prop
==
NULL
)
return
-
EINVAL
;
depth
=
*
prop
;
pitch
=
width
*
((
depth
+
7
)
/
8
);
prop
=
(
u32
*
)
get_property
(
np
,
"linebytes"
,
NULL
);
if
(
prop
)
pitch
=
*
prop
;
if
(
pitch
==
1
)
pitch
=
0x1000
;
prop
=
(
u32
*
)
get_property
(
np
,
"address"
,
NULL
);
if
(
prop
)
address
=
*
prop
;
/* FIXME: Add support for PCI reg properties */
if
(
address
==
0
)
return
-
EINVAL
;
g_loc_X
=
0
;
g_loc_Y
=
0
;
g_max_loc_X
=
width
/
8
;
g_max_loc_Y
=
height
/
16
;
logicalDisplayBase
=
(
unsigned
char
*
)
address
;
dispDeviceBase
=
(
unsigned
char
*
)
address
;
dispDeviceRowBytes
=
pitch
;
dispDeviceDepth
=
depth
;
dispDeviceRect
[
0
]
=
dispDeviceRect
[
1
]
=
0
;
dispDeviceRect
[
2
]
=
width
;
dispDeviceRect
[
3
]
=
height
;
map_boot_text
();
return
0
;
}
static
void
btext_putc
(
unsigned
char
c
)
{
btext_drawchar
(
c
);
}
void
__init
init_boot_display
(
void
)
{
char
*
name
;
struct
device_node
*
np
=
NULL
;
int
rc
=
-
ENODEV
;
printk
(
"trying to initialize btext ...
\n
"
);
name
=
(
char
*
)
get_property
(
of_chosen
,
"linux,stdout-path"
,
NULL
);
if
(
name
!=
NULL
)
{
np
=
of_find_node_by_path
(
name
);
if
(
np
!=
NULL
)
{
if
(
strcmp
(
np
->
type
,
"display"
)
!=
0
)
{
printk
(
"boot stdout isn't a display !
\n
"
);
of_node_put
(
np
);
np
=
NULL
;
}
}
}
if
(
np
)
rc
=
btext_initialize
(
np
);
if
(
rc
)
{
for
(
np
=
NULL
;
(
np
=
of_find_node_by_type
(
np
,
"display"
));)
{
if
(
get_property
(
np
,
"linux,opened"
,
NULL
))
{
printk
(
"trying %s ...
\n
"
,
np
->
full_name
);
rc
=
btext_initialize
(
np
);
printk
(
"result: %d
\n
"
,
rc
);
}
if
(
rc
==
0
)
break
;
}
}
if
(
rc
==
0
&&
udbg_putc
==
NULL
)
udbg_putc
=
btext_putc
;
}
/* Calc the base address of a given point (x,y) */
static
unsigned
char
*
calc_base
(
int
x
,
int
y
)
{
unsigned
char
*
base
;
base
=
logicalDisplayBase
;
if
(
base
==
0
)
base
=
dispDeviceBase
;
base
+=
(
x
+
dispDeviceRect
[
0
])
*
(
dispDeviceDepth
>>
3
);
base
+=
(
y
+
dispDeviceRect
[
1
])
*
dispDeviceRowBytes
;
return
base
;
}
/* Adjust the display to a new resolution */
void
btext_update_display
(
unsigned
long
phys
,
int
width
,
int
height
,
int
depth
,
int
pitch
)
{
if
(
dispDeviceBase
==
0
)
return
;
/* check it's the same frame buffer (within 256MB) */
if
((
phys
^
(
unsigned
long
)
dispDeviceBase
)
&
0xf0000000
)
return
;
dispDeviceBase
=
(
__u8
*
)
phys
;
dispDeviceRect
[
0
]
=
0
;
dispDeviceRect
[
1
]
=
0
;
dispDeviceRect
[
2
]
=
width
;
dispDeviceRect
[
3
]
=
height
;
dispDeviceDepth
=
depth
;
dispDeviceRowBytes
=
pitch
;
if
(
boot_text_mapped
)
{
iounmap
(
logicalDisplayBase
);
boot_text_mapped
=
0
;
}
map_boot_text
();
g_loc_X
=
0
;
g_loc_Y
=
0
;
g_max_loc_X
=
width
/
8
;
g_max_loc_Y
=
height
/
16
;
}
void
btext_clearscreen
(
void
)
{
unsigned
long
*
base
=
(
unsigned
long
*
)
calc_base
(
0
,
0
);
unsigned
long
width
=
((
dispDeviceRect
[
2
]
-
dispDeviceRect
[
0
])
*
(
dispDeviceDepth
>>
3
))
>>
3
;
int
i
,
j
;
for
(
i
=
0
;
i
<
(
dispDeviceRect
[
3
]
-
dispDeviceRect
[
1
]);
i
++
)
{
unsigned
long
*
ptr
=
base
;
for
(
j
=
width
;
j
;
--
j
)
*
(
ptr
++
)
=
0
;
base
+=
(
dispDeviceRowBytes
>>
3
);
}
}
#ifndef NO_SCROLL
static
void
scrollscreen
(
void
)
{
unsigned
long
*
src
=
(
unsigned
long
*
)
calc_base
(
0
,
16
);
unsigned
long
*
dst
=
(
unsigned
long
*
)
calc_base
(
0
,
0
);
unsigned
long
width
=
((
dispDeviceRect
[
2
]
-
dispDeviceRect
[
0
])
*
(
dispDeviceDepth
>>
3
))
>>
3
;
int
i
,
j
;
for
(
i
=
0
;
i
<
(
dispDeviceRect
[
3
]
-
dispDeviceRect
[
1
]
-
16
);
i
++
)
{
unsigned
long
*
src_ptr
=
src
;
unsigned
long
*
dst_ptr
=
dst
;
for
(
j
=
width
;
j
;
--
j
)
*
(
dst_ptr
++
)
=
*
(
src_ptr
++
);
src
+=
(
dispDeviceRowBytes
>>
3
);
dst
+=
(
dispDeviceRowBytes
>>
3
);
}
for
(
i
=
0
;
i
<
16
;
i
++
)
{
unsigned
long
*
dst_ptr
=
dst
;
for
(
j
=
width
;
j
;
--
j
)
*
(
dst_ptr
++
)
=
0
;
dst
+=
(
dispDeviceRowBytes
>>
3
);
}
}
#endif
/* ndef NO_SCROLL */
void
btext_drawchar
(
char
c
)
{
int
cline
=
0
;
#ifdef NO_SCROLL
int
x
;
#endif
if
(
!
boot_text_mapped
)
return
;
switch
(
c
)
{
case
'\b'
:
if
(
g_loc_X
>
0
)
--
g_loc_X
;
break
;
case
'\t'
:
g_loc_X
=
(
g_loc_X
&
-
8
)
+
8
;
break
;
case
'\r'
:
g_loc_X
=
0
;
break
;
case
'\n'
:
g_loc_X
=
0
;
g_loc_Y
++
;
cline
=
1
;
break
;
default:
draw_byte
(
c
,
g_loc_X
++
,
g_loc_Y
);
}
if
(
g_loc_X
>=
g_max_loc_X
)
{
g_loc_X
=
0
;
g_loc_Y
++
;
cline
=
1
;
}
#ifndef NO_SCROLL
while
(
g_loc_Y
>=
g_max_loc_Y
)
{
scrollscreen
();
g_loc_Y
--
;
}
#else
/* wrap around from bottom to top of screen so we don't
waste time scrolling each line. -- paulus. */
if
(
g_loc_Y
>=
g_max_loc_Y
)
g_loc_Y
=
0
;
if
(
cline
)
{
for
(
x
=
0
;
x
<
g_max_loc_X
;
++
x
)
draw_byte
(
' '
,
x
,
g_loc_Y
);
}
#endif
}
void
btext_drawstring
(
const
char
*
c
)
{
if
(
!
boot_text_mapped
)
return
;
while
(
*
c
)
btext_drawchar
(
*
c
++
);
}
void
btext_drawhex
(
unsigned
long
v
)
{
char
*
hex_table
=
"0123456789abcdef"
;
if
(
!
boot_text_mapped
)
return
;
btext_drawchar
(
hex_table
[(
v
>>
60
)
&
0x0000000FUL
]);
btext_drawchar
(
hex_table
[(
v
>>
56
)
&
0x0000000FUL
]);
btext_drawchar
(
hex_table
[(
v
>>
52
)
&
0x0000000FUL
]);
btext_drawchar
(
hex_table
[(
v
>>
48
)
&
0x0000000FUL
]);
btext_drawchar
(
hex_table
[(
v
>>
44
)
&
0x0000000FUL
]);
btext_drawchar
(
hex_table
[(
v
>>
40
)
&
0x0000000FUL
]);
btext_drawchar
(
hex_table
[(
v
>>
36
)
&
0x0000000FUL
]);
btext_drawchar
(
hex_table
[(
v
>>
32
)
&
0x0000000FUL
]);
btext_drawchar
(
hex_table
[(
v
>>
28
)
&
0x0000000FUL
]);
btext_drawchar
(
hex_table
[(
v
>>
24
)
&
0x0000000FUL
]);
btext_drawchar
(
hex_table
[(
v
>>
20
)
&
0x0000000FUL
]);
btext_drawchar
(
hex_table
[(
v
>>
16
)
&
0x0000000FUL
]);
btext_drawchar
(
hex_table
[(
v
>>
12
)
&
0x0000000FUL
]);
btext_drawchar
(
hex_table
[(
v
>>
8
)
&
0x0000000FUL
]);
btext_drawchar
(
hex_table
[(
v
>>
4
)
&
0x0000000FUL
]);
btext_drawchar
(
hex_table
[(
v
>>
0
)
&
0x0000000FUL
]);
btext_drawchar
(
' '
);
}
static
void
draw_byte
(
unsigned
char
c
,
long
locX
,
long
locY
)
{
unsigned
char
*
base
=
calc_base
(
locX
<<
3
,
locY
<<
4
);
unsigned
char
*
font
=
&
vga_font
[((
unsigned
int
)
c
)
*
16
];
int
rb
=
dispDeviceRowBytes
;
switch
(
dispDeviceDepth
)
{
case
24
:
case
32
:
draw_byte_32
(
font
,
(
unsigned
int
*
)
base
,
rb
);
break
;
case
15
:
case
16
:
draw_byte_16
(
font
,
(
unsigned
int
*
)
base
,
rb
);
break
;
case
8
:
draw_byte_8
(
font
,
(
unsigned
int
*
)
base
,
rb
);
break
;
}
}
static
unsigned
int
expand_bits_8
[
16
]
=
{
0x00000000
,
0x000000ff
,
0x0000ff00
,
0x0000ffff
,
0x00ff0000
,
0x00ff00ff
,
0x00ffff00
,
0x00ffffff
,
0xff000000
,
0xff0000ff
,
0xff00ff00
,
0xff00ffff
,
0xffff0000
,
0xffff00ff
,
0xffffff00
,
0xffffffff
};
static
unsigned
int
expand_bits_16
[
4
]
=
{
0x00000000
,
0x0000ffff
,
0xffff0000
,
0xffffffff
};
static
void
draw_byte_32
(
unsigned
char
*
font
,
unsigned
int
*
base
,
int
rb
)
{
int
l
,
bits
;
int
fg
=
0xFFFFFFFFUL
;
int
bg
=
0x00000000UL
;
for
(
l
=
0
;
l
<
16
;
++
l
)
{
bits
=
*
font
++
;
base
[
0
]
=
(
-
(
bits
>>
7
)
&
fg
)
^
bg
;
base
[
1
]
=
(
-
((
bits
>>
6
)
&
1
)
&
fg
)
^
bg
;
base
[
2
]
=
(
-
((
bits
>>
5
)
&
1
)
&
fg
)
^
bg
;
base
[
3
]
=
(
-
((
bits
>>
4
)
&
1
)
&
fg
)
^
bg
;
base
[
4
]
=
(
-
((
bits
>>
3
)
&
1
)
&
fg
)
^
bg
;
base
[
5
]
=
(
-
((
bits
>>
2
)
&
1
)
&
fg
)
^
bg
;
base
[
6
]
=
(
-
((
bits
>>
1
)
&
1
)
&
fg
)
^
bg
;
base
[
7
]
=
(
-
(
bits
&
1
)
&
fg
)
^
bg
;
base
=
(
unsigned
int
*
)
((
char
*
)
base
+
rb
);
}
}
static
void
draw_byte_16
(
unsigned
char
*
font
,
unsigned
int
*
base
,
int
rb
)
{
int
l
,
bits
;
int
fg
=
0xFFFFFFFFUL
;
int
bg
=
0x00000000UL
;
unsigned
int
*
eb
=
(
int
*
)
expand_bits_16
;
for
(
l
=
0
;
l
<
16
;
++
l
)
{
bits
=
*
font
++
;
base
[
0
]
=
(
eb
[
bits
>>
6
]
&
fg
)
^
bg
;
base
[
1
]
=
(
eb
[(
bits
>>
4
)
&
3
]
&
fg
)
^
bg
;
base
[
2
]
=
(
eb
[(
bits
>>
2
)
&
3
]
&
fg
)
^
bg
;
base
[
3
]
=
(
eb
[
bits
&
3
]
&
fg
)
^
bg
;
base
=
(
unsigned
int
*
)
((
char
*
)
base
+
rb
);
}
}
static
void
draw_byte_8
(
unsigned
char
*
font
,
unsigned
int
*
base
,
int
rb
)
{
int
l
,
bits
;
int
fg
=
0x0F0F0F0FUL
;
int
bg
=
0x00000000UL
;
unsigned
int
*
eb
=
(
int
*
)
expand_bits_8
;
for
(
l
=
0
;
l
<
16
;
++
l
)
{
bits
=
*
font
++
;
base
[
0
]
=
(
eb
[
bits
>>
4
]
&
fg
)
^
bg
;
base
[
1
]
=
(
eb
[
bits
&
0xf
]
&
fg
)
^
bg
;
base
=
(
unsigned
int
*
)
((
char
*
)
base
+
rb
);
}
}
static
unsigned
char
vga_font
[
cmapsz
]
=
{
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x7e
,
0x81
,
0xa5
,
0x81
,
0x81
,
0xbd
,
0x99
,
0x81
,
0x81
,
0x7e
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x7e
,
0xff
,
0xdb
,
0xff
,
0xff
,
0xc3
,
0xe7
,
0xff
,
0xff
,
0x7e
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x6c
,
0xfe
,
0xfe
,
0xfe
,
0xfe
,
0x7c
,
0x38
,
0x10
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x10
,
0x38
,
0x7c
,
0xfe
,
0x7c
,
0x38
,
0x10
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x18
,
0x3c
,
0x3c
,
0xe7
,
0xe7
,
0xe7
,
0x18
,
0x18
,
0x3c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x18
,
0x3c
,
0x7e
,
0xff
,
0xff
,
0x7e
,
0x18
,
0x18
,
0x3c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x18
,
0x3c
,
0x3c
,
0x18
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xff
,
0xff
,
0xff
,
0xff
,
0xff
,
0xff
,
0xe7
,
0xc3
,
0xc3
,
0xe7
,
0xff
,
0xff
,
0xff
,
0xff
,
0xff
,
0xff
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x3c
,
0x66
,
0x42
,
0x42
,
0x66
,
0x3c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xff
,
0xff
,
0xff
,
0xff
,
0xff
,
0xc3
,
0x99
,
0xbd
,
0xbd
,
0x99
,
0xc3
,
0xff
,
0xff
,
0xff
,
0xff
,
0xff
,
0x00
,
0x00
,
0x1e
,
0x0e
,
0x1a
,
0x32
,
0x78
,
0xcc
,
0xcc
,
0xcc
,
0xcc
,
0x78
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x3c
,
0x66
,
0x66
,
0x66
,
0x66
,
0x3c
,
0x18
,
0x7e
,
0x18
,
0x18
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x3f
,
0x33
,
0x3f
,
0x30
,
0x30
,
0x30
,
0x30
,
0x70
,
0xf0
,
0xe0
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x7f
,
0x63
,
0x7f
,
0x63
,
0x63
,
0x63
,
0x63
,
0x67
,
0xe7
,
0xe6
,
0xc0
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x18
,
0x18
,
0xdb
,
0x3c
,
0xe7
,
0x3c
,
0xdb
,
0x18
,
0x18
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x80
,
0xc0
,
0xe0
,
0xf0
,
0xf8
,
0xfe
,
0xf8
,
0xf0
,
0xe0
,
0xc0
,
0x80
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x02
,
0x06
,
0x0e
,
0x1e
,
0x3e
,
0xfe
,
0x3e
,
0x1e
,
0x0e
,
0x06
,
0x02
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x18
,
0x3c
,
0x7e
,
0x18
,
0x18
,
0x18
,
0x7e
,
0x3c
,
0x18
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x66
,
0x66
,
0x66
,
0x66
,
0x66
,
0x66
,
0x66
,
0x00
,
0x66
,
0x66
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x7f
,
0xdb
,
0xdb
,
0xdb
,
0x7b
,
0x1b
,
0x1b
,
0x1b
,
0x1b
,
0x1b
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x7c
,
0xc6
,
0x60
,
0x38
,
0x6c
,
0xc6
,
0xc6
,
0x6c
,
0x38
,
0x0c
,
0xc6
,
0x7c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xfe
,
0xfe
,
0xfe
,
0xfe
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x18
,
0x3c
,
0x7e
,
0x18
,
0x18
,
0x18
,
0x7e
,
0x3c
,
0x18
,
0x7e
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x18
,
0x3c
,
0x7e
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x7e
,
0x3c
,
0x18
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x18
,
0x0c
,
0xfe
,
0x0c
,
0x18
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x30
,
0x60
,
0xfe
,
0x60
,
0x30
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xc0
,
0xc0
,
0xc0
,
0xfe
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x24
,
0x66
,
0xff
,
0x66
,
0x24
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x10
,
0x38
,
0x38
,
0x7c
,
0x7c
,
0xfe
,
0xfe
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xfe
,
0xfe
,
0x7c
,
0x7c
,
0x38
,
0x38
,
0x10
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x18
,
0x3c
,
0x3c
,
0x3c
,
0x18
,
0x18
,
0x18
,
0x00
,
0x18
,
0x18
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x66
,
0x66
,
0x66
,
0x24
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x6c
,
0x6c
,
0xfe
,
0x6c
,
0x6c
,
0x6c
,
0xfe
,
0x6c
,
0x6c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x18
,
0x18
,
0x7c
,
0xc6
,
0xc2
,
0xc0
,
0x7c
,
0x06
,
0x06
,
0x86
,
0xc6
,
0x7c
,
0x18
,
0x18
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xc2
,
0xc6
,
0x0c
,
0x18
,
0x30
,
0x60
,
0xc6
,
0x86
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x38
,
0x6c
,
0x6c
,
0x38
,
0x76
,
0xdc
,
0xcc
,
0xcc
,
0xcc
,
0x76
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x30
,
0x30
,
0x30
,
0x60
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x0c
,
0x18
,
0x30
,
0x30
,
0x30
,
0x30
,
0x30
,
0x30
,
0x18
,
0x0c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x30
,
0x18
,
0x0c
,
0x0c
,
0x0c
,
0x0c
,
0x0c
,
0x0c
,
0x18
,
0x30
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x66
,
0x3c
,
0xff
,
0x3c
,
0x66
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x18
,
0x18
,
0x7e
,
0x18
,
0x18
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x18
,
0x18
,
0x18
,
0x30
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x7e
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x18
,
0x18
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x02
,
0x06
,
0x0c
,
0x18
,
0x30
,
0x60
,
0xc0
,
0x80
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x7c
,
0xc6
,
0xc6
,
0xce
,
0xde
,
0xf6
,
0xe6
,
0xc6
,
0xc6
,
0x7c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x18
,
0x38
,
0x78
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x7e
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x7c
,
0xc6
,
0x06
,
0x0c
,
0x18
,
0x30
,
0x60
,
0xc0
,
0xc6
,
0xfe
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x7c
,
0xc6
,
0x06
,
0x06
,
0x3c
,
0x06
,
0x06
,
0x06
,
0xc6
,
0x7c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x0c
,
0x1c
,
0x3c
,
0x6c
,
0xcc
,
0xfe
,
0x0c
,
0x0c
,
0x0c
,
0x1e
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xfe
,
0xc0
,
0xc0
,
0xc0
,
0xfc
,
0x06
,
0x06
,
0x06
,
0xc6
,
0x7c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x38
,
0x60
,
0xc0
,
0xc0
,
0xfc
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0x7c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xfe
,
0xc6
,
0x06
,
0x06
,
0x0c
,
0x18
,
0x30
,
0x30
,
0x30
,
0x30
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x7c
,
0xc6
,
0xc6
,
0xc6
,
0x7c
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0x7c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x7c
,
0xc6
,
0xc6
,
0xc6
,
0x7e
,
0x06
,
0x06
,
0x06
,
0x0c
,
0x78
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x18
,
0x18
,
0x00
,
0x00
,
0x00
,
0x18
,
0x18
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x18
,
0x18
,
0x00
,
0x00
,
0x00
,
0x18
,
0x18
,
0x30
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x06
,
0x0c
,
0x18
,
0x30
,
0x60
,
0x30
,
0x18
,
0x0c
,
0x06
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x7e
,
0x00
,
0x00
,
0x7e
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x60
,
0x30
,
0x18
,
0x0c
,
0x06
,
0x0c
,
0x18
,
0x30
,
0x60
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x7c
,
0xc6
,
0xc6
,
0x0c
,
0x18
,
0x18
,
0x18
,
0x00
,
0x18
,
0x18
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x7c
,
0xc6
,
0xc6
,
0xc6
,
0xde
,
0xde
,
0xde
,
0xdc
,
0xc0
,
0x7c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x10
,
0x38
,
0x6c
,
0xc6
,
0xc6
,
0xfe
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xfc
,
0x66
,
0x66
,
0x66
,
0x7c
,
0x66
,
0x66
,
0x66
,
0x66
,
0xfc
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x3c
,
0x66
,
0xc2
,
0xc0
,
0xc0
,
0xc0
,
0xc0
,
0xc2
,
0x66
,
0x3c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xf8
,
0x6c
,
0x66
,
0x66
,
0x66
,
0x66
,
0x66
,
0x66
,
0x6c
,
0xf8
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xfe
,
0x66
,
0x62
,
0x68
,
0x78
,
0x68
,
0x60
,
0x62
,
0x66
,
0xfe
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xfe
,
0x66
,
0x62
,
0x68
,
0x78
,
0x68
,
0x60
,
0x60
,
0x60
,
0xf0
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x3c
,
0x66
,
0xc2
,
0xc0
,
0xc0
,
0xde
,
0xc6
,
0xc6
,
0x66
,
0x3a
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0xfe
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x3c
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x3c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x1e
,
0x0c
,
0x0c
,
0x0c
,
0x0c
,
0x0c
,
0xcc
,
0xcc
,
0xcc
,
0x78
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xe6
,
0x66
,
0x66
,
0x6c
,
0x78
,
0x78
,
0x6c
,
0x66
,
0x66
,
0xe6
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xf0
,
0x60
,
0x60
,
0x60
,
0x60
,
0x60
,
0x60
,
0x62
,
0x66
,
0xfe
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xc3
,
0xe7
,
0xff
,
0xff
,
0xdb
,
0xc3
,
0xc3
,
0xc3
,
0xc3
,
0xc3
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xc6
,
0xe6
,
0xf6
,
0xfe
,
0xde
,
0xce
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x7c
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0x7c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xfc
,
0x66
,
0x66
,
0x66
,
0x7c
,
0x60
,
0x60
,
0x60
,
0x60
,
0xf0
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x7c
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0xd6
,
0xde
,
0x7c
,
0x0c
,
0x0e
,
0x00
,
0x00
,
0x00
,
0x00
,
0xfc
,
0x66
,
0x66
,
0x66
,
0x7c
,
0x6c
,
0x66
,
0x66
,
0x66
,
0xe6
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x7c
,
0xc6
,
0xc6
,
0x60
,
0x38
,
0x0c
,
0x06
,
0xc6
,
0xc6
,
0x7c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xff
,
0xdb
,
0x99
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x3c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0x7c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xc3
,
0xc3
,
0xc3
,
0xc3
,
0xc3
,
0xc3
,
0xc3
,
0x66
,
0x3c
,
0x18
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xc3
,
0xc3
,
0xc3
,
0xc3
,
0xc3
,
0xdb
,
0xdb
,
0xff
,
0x66
,
0x66
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xc3
,
0xc3
,
0x66
,
0x3c
,
0x18
,
0x18
,
0x3c
,
0x66
,
0xc3
,
0xc3
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xc3
,
0xc3
,
0xc3
,
0x66
,
0x3c
,
0x18
,
0x18
,
0x18
,
0x18
,
0x3c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xff
,
0xc3
,
0x86
,
0x0c
,
0x18
,
0x30
,
0x60
,
0xc1
,
0xc3
,
0xff
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x3c
,
0x30
,
0x30
,
0x30
,
0x30
,
0x30
,
0x30
,
0x30
,
0x30
,
0x3c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x80
,
0xc0
,
0xe0
,
0x70
,
0x38
,
0x1c
,
0x0e
,
0x06
,
0x02
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x3c
,
0x0c
,
0x0c
,
0x0c
,
0x0c
,
0x0c
,
0x0c
,
0x0c
,
0x0c
,
0x3c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x10
,
0x38
,
0x6c
,
0xc6
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xff
,
0x00
,
0x00
,
0x30
,
0x30
,
0x18
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x78
,
0x0c
,
0x7c
,
0xcc
,
0xcc
,
0xcc
,
0x76
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xe0
,
0x60
,
0x60
,
0x78
,
0x6c
,
0x66
,
0x66
,
0x66
,
0x66
,
0x7c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x7c
,
0xc6
,
0xc0
,
0xc0
,
0xc0
,
0xc6
,
0x7c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x1c
,
0x0c
,
0x0c
,
0x3c
,
0x6c
,
0xcc
,
0xcc
,
0xcc
,
0xcc
,
0x76
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x7c
,
0xc6
,
0xfe
,
0xc0
,
0xc0
,
0xc6
,
0x7c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x38
,
0x6c
,
0x64
,
0x60
,
0xf0
,
0x60
,
0x60
,
0x60
,
0x60
,
0xf0
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x76
,
0xcc
,
0xcc
,
0xcc
,
0xcc
,
0xcc
,
0x7c
,
0x0c
,
0xcc
,
0x78
,
0x00
,
0x00
,
0x00
,
0xe0
,
0x60
,
0x60
,
0x6c
,
0x76
,
0x66
,
0x66
,
0x66
,
0x66
,
0xe6
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x18
,
0x18
,
0x00
,
0x38
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x3c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x06
,
0x06
,
0x00
,
0x0e
,
0x06
,
0x06
,
0x06
,
0x06
,
0x06
,
0x06
,
0x66
,
0x66
,
0x3c
,
0x00
,
0x00
,
0x00
,
0xe0
,
0x60
,
0x60
,
0x66
,
0x6c
,
0x78
,
0x78
,
0x6c
,
0x66
,
0xe6
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x38
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x3c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xe6
,
0xff
,
0xdb
,
0xdb
,
0xdb
,
0xdb
,
0xdb
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xdc
,
0x66
,
0x66
,
0x66
,
0x66
,
0x66
,
0x66
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x7c
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0x7c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xdc
,
0x66
,
0x66
,
0x66
,
0x66
,
0x66
,
0x7c
,
0x60
,
0x60
,
0xf0
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x76
,
0xcc
,
0xcc
,
0xcc
,
0xcc
,
0xcc
,
0x7c
,
0x0c
,
0x0c
,
0x1e
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xdc
,
0x76
,
0x66
,
0x60
,
0x60
,
0x60
,
0xf0
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x7c
,
0xc6
,
0x60
,
0x38
,
0x0c
,
0xc6
,
0x7c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x10
,
0x30
,
0x30
,
0xfc
,
0x30
,
0x30
,
0x30
,
0x30
,
0x36
,
0x1c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xcc
,
0xcc
,
0xcc
,
0xcc
,
0xcc
,
0xcc
,
0x76
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xc3
,
0xc3
,
0xc3
,
0xc3
,
0x66
,
0x3c
,
0x18
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xc3
,
0xc3
,
0xc3
,
0xdb
,
0xdb
,
0xff
,
0x66
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xc3
,
0x66
,
0x3c
,
0x18
,
0x3c
,
0x66
,
0xc3
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0x7e
,
0x06
,
0x0c
,
0xf8
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xfe
,
0xcc
,
0x18
,
0x30
,
0x60
,
0xc6
,
0xfe
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x0e
,
0x18
,
0x18
,
0x18
,
0x70
,
0x18
,
0x18
,
0x18
,
0x18
,
0x0e
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x18
,
0x18
,
0x18
,
0x18
,
0x00
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x70
,
0x18
,
0x18
,
0x18
,
0x0e
,
0x18
,
0x18
,
0x18
,
0x18
,
0x70
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x76
,
0xdc
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x10
,
0x38
,
0x6c
,
0xc6
,
0xc6
,
0xc6
,
0xfe
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x3c
,
0x66
,
0xc2
,
0xc0
,
0xc0
,
0xc0
,
0xc2
,
0x66
,
0x3c
,
0x0c
,
0x06
,
0x7c
,
0x00
,
0x00
,
0x00
,
0x00
,
0xcc
,
0x00
,
0x00
,
0xcc
,
0xcc
,
0xcc
,
0xcc
,
0xcc
,
0xcc
,
0x76
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x0c
,
0x18
,
0x30
,
0x00
,
0x7c
,
0xc6
,
0xfe
,
0xc0
,
0xc0
,
0xc6
,
0x7c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x10
,
0x38
,
0x6c
,
0x00
,
0x78
,
0x0c
,
0x7c
,
0xcc
,
0xcc
,
0xcc
,
0x76
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xcc
,
0x00
,
0x00
,
0x78
,
0x0c
,
0x7c
,
0xcc
,
0xcc
,
0xcc
,
0x76
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x60
,
0x30
,
0x18
,
0x00
,
0x78
,
0x0c
,
0x7c
,
0xcc
,
0xcc
,
0xcc
,
0x76
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x38
,
0x6c
,
0x38
,
0x00
,
0x78
,
0x0c
,
0x7c
,
0xcc
,
0xcc
,
0xcc
,
0x76
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x3c
,
0x66
,
0x60
,
0x60
,
0x66
,
0x3c
,
0x0c
,
0x06
,
0x3c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x10
,
0x38
,
0x6c
,
0x00
,
0x7c
,
0xc6
,
0xfe
,
0xc0
,
0xc0
,
0xc6
,
0x7c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xc6
,
0x00
,
0x00
,
0x7c
,
0xc6
,
0xfe
,
0xc0
,
0xc0
,
0xc6
,
0x7c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x60
,
0x30
,
0x18
,
0x00
,
0x7c
,
0xc6
,
0xfe
,
0xc0
,
0xc0
,
0xc6
,
0x7c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x66
,
0x00
,
0x00
,
0x38
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x3c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x18
,
0x3c
,
0x66
,
0x00
,
0x38
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x3c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x60
,
0x30
,
0x18
,
0x00
,
0x38
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x3c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xc6
,
0x00
,
0x10
,
0x38
,
0x6c
,
0xc6
,
0xc6
,
0xfe
,
0xc6
,
0xc6
,
0xc6
,
0x00
,
0x00
,
0x00
,
0x00
,
0x38
,
0x6c
,
0x38
,
0x00
,
0x38
,
0x6c
,
0xc6
,
0xc6
,
0xfe
,
0xc6
,
0xc6
,
0xc6
,
0x00
,
0x00
,
0x00
,
0x00
,
0x18
,
0x30
,
0x60
,
0x00
,
0xfe
,
0x66
,
0x60
,
0x7c
,
0x60
,
0x60
,
0x66
,
0xfe
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x6e
,
0x3b
,
0x1b
,
0x7e
,
0xd8
,
0xdc
,
0x77
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x3e
,
0x6c
,
0xcc
,
0xcc
,
0xfe
,
0xcc
,
0xcc
,
0xcc
,
0xcc
,
0xce
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x10
,
0x38
,
0x6c
,
0x00
,
0x7c
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0x7c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xc6
,
0x00
,
0x00
,
0x7c
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0x7c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x60
,
0x30
,
0x18
,
0x00
,
0x7c
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0x7c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x30
,
0x78
,
0xcc
,
0x00
,
0xcc
,
0xcc
,
0xcc
,
0xcc
,
0xcc
,
0xcc
,
0x76
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x60
,
0x30
,
0x18
,
0x00
,
0xcc
,
0xcc
,
0xcc
,
0xcc
,
0xcc
,
0xcc
,
0x76
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xc6
,
0x00
,
0x00
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0x7e
,
0x06
,
0x0c
,
0x78
,
0x00
,
0x00
,
0xc6
,
0x00
,
0x7c
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0x7c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xc6
,
0x00
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0x7c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x18
,
0x18
,
0x7e
,
0xc3
,
0xc0
,
0xc0
,
0xc0
,
0xc3
,
0x7e
,
0x18
,
0x18
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x38
,
0x6c
,
0x64
,
0x60
,
0xf0
,
0x60
,
0x60
,
0x60
,
0x60
,
0xe6
,
0xfc
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xc3
,
0x66
,
0x3c
,
0x18
,
0xff
,
0x18
,
0xff
,
0x18
,
0x18
,
0x18
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xfc
,
0x66
,
0x66
,
0x7c
,
0x62
,
0x66
,
0x6f
,
0x66
,
0x66
,
0x66
,
0xf3
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x0e
,
0x1b
,
0x18
,
0x18
,
0x18
,
0x7e
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0xd8
,
0x70
,
0x00
,
0x00
,
0x00
,
0x18
,
0x30
,
0x60
,
0x00
,
0x78
,
0x0c
,
0x7c
,
0xcc
,
0xcc
,
0xcc
,
0x76
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x0c
,
0x18
,
0x30
,
0x00
,
0x38
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x3c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x18
,
0x30
,
0x60
,
0x00
,
0x7c
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0x7c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x18
,
0x30
,
0x60
,
0x00
,
0xcc
,
0xcc
,
0xcc
,
0xcc
,
0xcc
,
0xcc
,
0x76
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x76
,
0xdc
,
0x00
,
0xdc
,
0x66
,
0x66
,
0x66
,
0x66
,
0x66
,
0x66
,
0x00
,
0x00
,
0x00
,
0x00
,
0x76
,
0xdc
,
0x00
,
0xc6
,
0xe6
,
0xf6
,
0xfe
,
0xde
,
0xce
,
0xc6
,
0xc6
,
0xc6
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x3c
,
0x6c
,
0x6c
,
0x3e
,
0x00
,
0x7e
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x38
,
0x6c
,
0x6c
,
0x38
,
0x00
,
0x7c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x30
,
0x30
,
0x00
,
0x30
,
0x30
,
0x60
,
0xc0
,
0xc6
,
0xc6
,
0x7c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xfe
,
0xc0
,
0xc0
,
0xc0
,
0xc0
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xfe
,
0x06
,
0x06
,
0x06
,
0x06
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xc0
,
0xc0
,
0xc2
,
0xc6
,
0xcc
,
0x18
,
0x30
,
0x60
,
0xce
,
0x9b
,
0x06
,
0x0c
,
0x1f
,
0x00
,
0x00
,
0x00
,
0xc0
,
0xc0
,
0xc2
,
0xc6
,
0xcc
,
0x18
,
0x30
,
0x66
,
0xce
,
0x96
,
0x3e
,
0x06
,
0x06
,
0x00
,
0x00
,
0x00
,
0x00
,
0x18
,
0x18
,
0x00
,
0x18
,
0x18
,
0x18
,
0x3c
,
0x3c
,
0x3c
,
0x18
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x36
,
0x6c
,
0xd8
,
0x6c
,
0x36
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xd8
,
0x6c
,
0x36
,
0x6c
,
0xd8
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x11
,
0x44
,
0x11
,
0x44
,
0x11
,
0x44
,
0x11
,
0x44
,
0x11
,
0x44
,
0x11
,
0x44
,
0x11
,
0x44
,
0x11
,
0x44
,
0x55
,
0xaa
,
0x55
,
0xaa
,
0x55
,
0xaa
,
0x55
,
0xaa
,
0x55
,
0xaa
,
0x55
,
0xaa
,
0x55
,
0xaa
,
0x55
,
0xaa
,
0xdd
,
0x77
,
0xdd
,
0x77
,
0xdd
,
0x77
,
0xdd
,
0x77
,
0xdd
,
0x77
,
0xdd
,
0x77
,
0xdd
,
0x77
,
0xdd
,
0x77
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0xf8
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0xf8
,
0x18
,
0xf8
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0xf6
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xfe
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xf8
,
0x18
,
0xf8
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0xf6
,
0x06
,
0xf6
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xfe
,
0x06
,
0xf6
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0xf6
,
0x06
,
0xfe
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0xfe
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0xf8
,
0x18
,
0xf8
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xf8
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x1f
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0xff
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xff
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x1f
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xff
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0xff
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x1f
,
0x18
,
0x1f
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x37
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x37
,
0x30
,
0x3f
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x3f
,
0x30
,
0x37
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0xf7
,
0x00
,
0xff
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xff
,
0x00
,
0xf7
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x37
,
0x30
,
0x37
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xff
,
0x00
,
0xff
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0xf7
,
0x00
,
0xf7
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0xff
,
0x00
,
0xff
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0xff
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xff
,
0x00
,
0xff
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xff
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x3f
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x1f
,
0x18
,
0x1f
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x1f
,
0x18
,
0x1f
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x3f
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0xff
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x36
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0xff
,
0x18
,
0xff
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0xf8
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x1f
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0xff
,
0xff
,
0xff
,
0xff
,
0xff
,
0xff
,
0xff
,
0xff
,
0xff
,
0xff
,
0xff
,
0xff
,
0xff
,
0xff
,
0xff
,
0xff
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xff
,
0xff
,
0xff
,
0xff
,
0xff
,
0xff
,
0xff
,
0xff
,
0xff
,
0xf0
,
0xf0
,
0xf0
,
0xf0
,
0xf0
,
0xf0
,
0xf0
,
0xf0
,
0xf0
,
0xf0
,
0xf0
,
0xf0
,
0xf0
,
0xf0
,
0xf0
,
0xf0
,
0x0f
,
0x0f
,
0x0f
,
0x0f
,
0x0f
,
0x0f
,
0x0f
,
0x0f
,
0x0f
,
0x0f
,
0x0f
,
0x0f
,
0x0f
,
0x0f
,
0x0f
,
0x0f
,
0xff
,
0xff
,
0xff
,
0xff
,
0xff
,
0xff
,
0xff
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x76
,
0xdc
,
0xd8
,
0xd8
,
0xd8
,
0xdc
,
0x76
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x78
,
0xcc
,
0xcc
,
0xcc
,
0xd8
,
0xcc
,
0xc6
,
0xc6
,
0xc6
,
0xcc
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xfe
,
0xc6
,
0xc6
,
0xc0
,
0xc0
,
0xc0
,
0xc0
,
0xc0
,
0xc0
,
0xc0
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xfe
,
0x6c
,
0x6c
,
0x6c
,
0x6c
,
0x6c
,
0x6c
,
0x6c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xfe
,
0xc6
,
0x60
,
0x30
,
0x18
,
0x30
,
0x60
,
0xc6
,
0xfe
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x7e
,
0xd8
,
0xd8
,
0xd8
,
0xd8
,
0xd8
,
0x70
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x66
,
0x66
,
0x66
,
0x66
,
0x66
,
0x7c
,
0x60
,
0x60
,
0xc0
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x76
,
0xdc
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x7e
,
0x18
,
0x3c
,
0x66
,
0x66
,
0x66
,
0x3c
,
0x18
,
0x7e
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x38
,
0x6c
,
0xc6
,
0xc6
,
0xfe
,
0xc6
,
0xc6
,
0x6c
,
0x38
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x38
,
0x6c
,
0xc6
,
0xc6
,
0xc6
,
0x6c
,
0x6c
,
0x6c
,
0x6c
,
0xee
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x1e
,
0x30
,
0x18
,
0x0c
,
0x3e
,
0x66
,
0x66
,
0x66
,
0x66
,
0x3c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x7e
,
0xdb
,
0xdb
,
0xdb
,
0x7e
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x03
,
0x06
,
0x7e
,
0xdb
,
0xdb
,
0xf3
,
0x7e
,
0x60
,
0xc0
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x1c
,
0x30
,
0x60
,
0x60
,
0x7c
,
0x60
,
0x60
,
0x60
,
0x30
,
0x1c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x7c
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0xc6
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xfe
,
0x00
,
0x00
,
0xfe
,
0x00
,
0x00
,
0xfe
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x18
,
0x18
,
0x7e
,
0x18
,
0x18
,
0x00
,
0x00
,
0xff
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x30
,
0x18
,
0x0c
,
0x06
,
0x0c
,
0x18
,
0x30
,
0x00
,
0x7e
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x0c
,
0x18
,
0x30
,
0x60
,
0x30
,
0x18
,
0x0c
,
0x00
,
0x7e
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x0e
,
0x1b
,
0x1b
,
0x1b
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0x18
,
0xd8
,
0xd8
,
0xd8
,
0x70
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x18
,
0x18
,
0x00
,
0x7e
,
0x00
,
0x18
,
0x18
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x76
,
0xdc
,
0x00
,
0x76
,
0xdc
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x38
,
0x6c
,
0x6c
,
0x38
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x18
,
0x18
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x18
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x0f
,
0x0c
,
0x0c
,
0x0c
,
0x0c
,
0x0c
,
0xec
,
0x6c
,
0x6c
,
0x3c
,
0x1c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xd8
,
0x6c
,
0x6c
,
0x6c
,
0x6c
,
0x6c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x70
,
0xd8
,
0x30
,
0x60
,
0xc8
,
0xf8
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x7c
,
0x7c
,
0x7c
,
0x7c
,
0x7c
,
0x7c
,
0x7c
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
};
arch/ppc64/kernel/head.S
deleted
100644 → 0
View file @
302fe175
/*
*
arch
/
ppc64
/
kernel
/
head
.
S
*
*
PowerPC
version
*
Copyright
(
C
)
1995
-
1996
Gary
Thomas
(
gdt
@
linuxppc
.
org
)
*
*
Rewritten
by
Cort
Dougan
(
cort
@
cs
.
nmt
.
edu
)
for
PReP
*
Copyright
(
C
)
1996
Cort
Dougan
<
cort
@
cs
.
nmt
.
edu
>
*
Adapted
for
Power
Macintosh
by
Paul
Mackerras
.
*
Low
-
level
exception
handlers
and
MMU
support
*
rewritten
by
Paul
Mackerras
.
*
Copyright
(
C
)
1996
Paul
Mackerras
.
*
*
Adapted
for
64
bit
PowerPC
by
Dave
Engebretsen
,
Peter
Bergner
,
and
*
Mike
Corrigan
{
engebret|bergner|mikejc
}
@
us
.
ibm
.
com
*
*
This
file
contains
the
low
-
level
support
and
setup
for
the
*
PowerPC
-
64
platform
,
including
trap
and
interrupt
dispatch
.
*
*
This
program
is
free
software
; you can redistribute it and/or
*
modify
it
under
the
terms
of
the
GNU
General
Public
License
*
as
published
by
the
Free
Software
Foundation
; either version
*
2
of
the
License
,
or
(
at
your
option
)
any
later
version
.
*/
#include <linux/config.h>
#include <linux/threads.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/mmu.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/bug.h>
#include <asm/cputable.h>
#include <asm/setup.h>
#include <asm/hvcall.h>
#include <asm/iseries/lpar_map.h>
#include <asm/thread_info.h>
#ifdef CONFIG_PPC_ISERIES
#define DO_SOFT_DISABLE
#endif
/*
*
We
layout
physical
memory
as
follows
:
*
0x0000
-
0x00ff
:
Secondary
processor
spin
code
*
0x0100
-
0x2fff
:
pSeries
Interrupt
prologs
*
0x3000
-
0x5fff
:
interrupt
support
,
iSeries
and
common
interrupt
prologs
*
0x6000
-
0x6fff
:
Initial
(
CPU0
)
segment
table
*
0x7000
-
0x7fff
:
FWNMI
data
area
*
0x8000
-
:
Early
init
and
support
code
*/
/*
*
SPRG
Usage
*
*
Register
Definition
*
*
SPRG0
reserved
for
hypervisor
*
SPRG1
temp
-
used
to
save
gpr
*
SPRG2
temp
-
used
to
save
gpr
*
SPRG3
virt
addr
of
paca
*/
/*
*
Entering
into
this
code
we
make
the
following
assumptions
:
*
For
pSeries
:
*
1
.
The
MMU
is
off
&
open
firmware
is
running
in
real
mode
.
*
2
.
The
kernel
is
entered
at
__start
*
*
For
iSeries
:
*
1
.
The
MMU
is
on
(
as
it
always
is
for
iSeries
)
*
2
.
The
kernel
is
entered
at
system_reset_iSeries
*/
.
text
.
globl
_stext
_stext
:
#ifdef CONFIG_PPC_MULTIPLATFORM
_GLOBAL
(
__start
)
/
*
NOP
this
out
unconditionally
*/
BEGIN_FTR_SECTION
b
.
__start_initialization_multiplatform
END_FTR_SECTION
(
0
,
1)
#endif /* CONFIG_PPC_MULTIPLATFORM */
/
*
Catch
branch
to
0
in
real
mode
*/
trap
#ifdef CONFIG_PPC_ISERIES
/
*
*
At
offset
0x20
,
there
is
a
pointer
to
iSeries
LPAR
data
.
*
This
is
required
by
the
hypervisor
*/
.
=
0x20
.
llong
hvReleaseData
-
KERNELBASE
/
*
*
At
offset
0x28
and
0x30
are
offsets
to
the
mschunks_map
*
array
(
used
by
the
iSeries
LPAR
debugger
to
do
translation
*
between
physical
addresses
and
absolute
addresses
)
and
*
to
the
pidhash
table
(
also
used
by
the
debugger
)
*/
.
llong
mschunks_map
-
KERNELBASE
.
llong
0
/*
pidhash
-
KERNELBASE
SFRXXX
*/
/
*
Offset
0x38
-
Pointer
to
start
of
embedded
System
.
map
*/
.
globl
embedded_sysmap_start
embedded_sysmap_start
:
.
llong
0
/
*
Offset
0x40
-
Pointer
to
end
of
embedded
System
.
map
*/
.
globl
embedded_sysmap_end
embedded_sysmap_end
:
.
llong
0
#endif /* CONFIG_PPC_ISERIES */
/
*
Secondary
processors
spin
on
this
value
until
it
goes
to
1
.
*/
.
globl
__secondary_hold_spinloop
__secondary_hold_spinloop
:
.
llong
0x0
/
*
Secondary
processors
write
this
value
with
their
cpu
#
*/
/
*
after
they
enter
the
spin
loop
immediately
below
.
*/
.
globl
__secondary_hold_acknowledge
__secondary_hold_acknowledge
:
.
llong
0x0
.
=
0x60
/*
*
The
following
code
is
used
on
pSeries
to
hold
secondary
processors
*
in
a
spin
loop
after
they
have
been
freed
from
OpenFirmware
,
but
*
before
the
bulk
of
the
kernel
has
been
relocated
.
This
code
*
is
relocated
to
physical
address
0x60
before
prom_init
is
run
.
*
All
of
it
must
fit
below
the
first
exception
vector
at
0x100
.
*/
_GLOBAL
(
__secondary_hold
)
mfmsr
r24
ori
r24
,
r24
,
MSR_RI
mtmsrd
r24
/*
RI
on
*/
/
*
Grab
our
linux
cpu
number
*/
mr
r24
,
r3
/
*
Tell
the
master
cpu
we
're here */
/
*
Relocation
is
off
&
we
are
located
at
an
address
less
*/
/
*
than
0x100
,
so
only
need
to
grab
low
order
offset
.
*/
std
r24
,
__secondary_hold_acknowledge
@
l
(
0
)
sync
/
*
All
secondary
cpus
wait
here
until
told
to
start
.
*/
100
:
ld
r4
,
__secondary_hold_spinloop
@
l
(
0
)
cmpdi
0
,
r4
,
1
bne
100
b
#ifdef CONFIG_HMT
b
.
hmt_init
#else
#ifdef CONFIG_SMP
mr
r3
,
r24
b
.
pSeries_secondary_smp_init
#else
BUG_OPCODE
#endif
#endif
/*
This
value
is
used
to
mark
exception
frames
on
the
stack
.
*/
.
section
".toc"
,
"aw"
exception_marker
:
.
tc
ID_72656773_68657265
[
TC
],
0x7265677368657265
.
text
/*
*
The
following
macros
define
the
code
that
appears
as
*
the
prologue
to
each
of
the
exception
handlers
.
They
*
are
split
into
two
parts
to
allow
a
single
kernel
binary
*
to
be
used
for
pSeries
and
iSeries
.
*
LOL
.
One
day
...
-
paulus
*/
/*
*
We
make
as
much
of
the
exception
code
common
between
native
*
exception
handlers
(
including
pSeries
LPAR
)
and
iSeries
LPAR
*
implementations
as
possible
.
*/
/*
*
This
is
the
start
of
the
interrupt
handlers
for
pSeries
*
This
code
runs
with
relocation
off
.
*/
#define EX_R9 0
#define EX_R10 8
#define EX_R11 16
#define EX_R12 24
#define EX_R13 32
#define EX_SRR0 40
#define EX_DAR 48
#define EX_DSISR 56
#define EX_CCR 60
#define EX_R3 64
#define EX_LR 72
#define EXCEPTION_PROLOG_PSERIES(area, label) \
mfspr
r13
,
SPRN_SPRG3
; /* get paca address into r13 */ \
std
r9
,
area
+
EX_R9
(
r13
)
; /* save r9 - r12 */ \
std
r10
,
area
+
EX_R10
(
r13
)
; \
std
r11
,
area
+
EX_R11
(
r13
)
; \
std
r12
,
area
+
EX_R12
(
r13
)
; \
mfspr
r9
,
SPRN_SPRG1
; \
std
r9
,
area
+
EX_R13
(
r13
)
; \
mfcr
r9
; \
clrrdi
r12
,
r13
,
32
; /* get high part of &label */ \
mfmsr
r10
; \
mfspr
r11
,
SPRN_SRR0
; /* save SRR0 */ \
ori
r12
,
r12
,(
label
)
@
l
; /* virt addr of handler */ \
ori
r10
,
r10
,
MSR_IR|MSR_DR|MSR_RI
; \
mtspr
SPRN_SRR0
,
r12
; \
mfspr
r12
,
SPRN_SRR1
; /* and SRR1 */ \
mtspr
SPRN_SRR1
,
r10
; \
rfid
; \
b
.
/*
prevent
speculative
execution
*/
/*
*
This
is
the
start
of
the
interrupt
handlers
for
iSeries
*
This
code
runs
with
relocation
on
.
*/
#define EXCEPTION_PROLOG_ISERIES_1(area) \
mfspr
r13
,
SPRN_SPRG3
; /* get paca address into r13 */ \
std
r9
,
area
+
EX_R9
(
r13
)
; /* save r9 - r12 */ \
std
r10
,
area
+
EX_R10
(
r13
)
; \
std
r11
,
area
+
EX_R11
(
r13
)
; \
std
r12
,
area
+
EX_R12
(
r13
)
; \
mfspr
r9
,
SPRN_SPRG1
; \
std
r9
,
area
+
EX_R13
(
r13
)
; \
mfcr
r9
#define EXCEPTION_PROLOG_ISERIES_2 \
mfmsr
r10
; \
ld
r11
,
PACALPPACA
+
LPPACASRR0
(
r13
)
; \
ld
r12
,
PACALPPACA
+
LPPACASRR1
(
r13
)
; \
ori
r10
,
r10
,
MSR_RI
; \
mtmsrd
r10
,
1
/*
*
The
common
exception
prolog
is
used
for
all
except
a
few
exceptions
*
such
as
a
segment
miss
on
a
kernel
address
.
We
have
to
be
prepared
*
to
take
another
exception
from
the
point
where
we
first
touch
the
*
kernel
stack
onwards
.
*
*
On
entry
r13
points
to
the
paca
,
r9
-
r13
are
saved
in
the
paca
,
*
r9
contains
the
saved
CR
,
r11
and
r12
contain
the
saved
SRR0
and
*
SRR1
,
and
relocation
is
on
.
*/
#define EXCEPTION_PROLOG_COMMON(n, area) \
andi
.
r10
,
r12
,
MSR_PR
; /* See if coming from user */ \
mr
r10
,
r1
; /* Save r1 */ \
subi
r1
,
r1
,
INT_FRAME_SIZE
; /* alloc frame on kernel stack */ \
beq
-
1
f
; \
ld
r1
,
PACAKSAVE
(
r13
)
; /* kernel stack to use */ \
1
:
cmpdi
cr1
,
r1
,
0
; /* check if r1 is in userspace */ \
bge
-
cr1
,
bad_stack
; /* abort if it is */ \
std
r9
,
_CCR
(
r1
)
; /* save CR in stackframe */ \
std
r11
,
_NIP
(
r1
)
; /* save SRR0 in stackframe */ \
std
r12
,
_MSR
(
r1
)
; /* save SRR1 in stackframe */ \
std
r10
,
0
(
r1
)
; /* make stack chain pointer */ \
std
r0
,
GPR0
(
r1
)
; /* save r0 in stackframe */ \
std
r10
,
GPR1
(
r1
)
; /* save r1 in stackframe */ \
std
r2
,
GPR2
(
r1
)
; /* save r2 in stackframe */ \
SAVE_4GPRS
(3,
r1
)
; /* save r3 - r6 in stackframe */ \
SAVE_2GPRS
(7,
r1
)
; /* save r7, r8 in stackframe */ \
ld
r9
,
area
+
EX_R9
(
r13
)
; /* move r9, r10 to stackframe */ \
ld
r10
,
area
+
EX_R10
(
r13
)
; \
std
r9
,
GPR9
(
r1
)
; \
std
r10
,
GPR10
(
r1
)
; \
ld
r9
,
area
+
EX_R11
(
r13
)
; /* move r11 - r13 to stackframe */ \
ld
r10
,
area
+
EX_R12
(
r13
)
; \
ld
r11
,
area
+
EX_R13
(
r13
)
; \
std
r9
,
GPR11
(
r1
)
; \
std
r10
,
GPR12
(
r1
)
; \
std
r11
,
GPR13
(
r1
)
; \
ld
r2
,
PACATOC
(
r13
)
; /* get kernel TOC into r2 */ \
mflr
r9
; /* save LR in stackframe */ \
std
r9
,
_LINK
(
r1
)
; \
mfctr
r10
; /* save CTR in stackframe */ \
std
r10
,
_CTR
(
r1
)
; \
mfspr
r11
,
SPRN_XER
; /* save XER in stackframe */ \
std
r11
,
_XER
(
r1
)
; \
li
r9
,(
n
)+
1
; \
std
r9
,
_TRAP
(
r1
)
; /* set trap number */ \
li
r10
,
0
; \
ld
r11
,
exception_marker
@
toc
(
r2
)
; \
std
r10
,
RESULT
(
r1
)
; /* clear regs->result */ \
std
r11
,
STACK_FRAME_OVERHEAD
-
16
(
r1
)
; /* mark the frame */
/*
*
Exception
vectors
.
*/
#define STD_EXCEPTION_PSERIES(n, label) \
.
=
n
; \
.
globl
label
##
_pSeries
; \
label
##
_pSeries
:
\
HMT_MEDIUM
; \
mtspr
SPRN_SPRG1
,
r13
; /* save r13 */ \
RUNLATCH_ON
(
r13
)
; \
EXCEPTION_PROLOG_PSERIES
(
PACA_EXGEN
,
label
##
_common
)
#define STD_EXCEPTION_ISERIES(n, label, area) \
.
globl
label
##
_iSeries
; \
label
##
_iSeries
:
\
HMT_MEDIUM
; \
mtspr
SPRN_SPRG1
,
r13
; /* save r13 */ \
RUNLATCH_ON
(
r13
)
; \
EXCEPTION_PROLOG_ISERIES_1
(
area
)
; \
EXCEPTION_PROLOG_ISERIES_2
; \
b
label
##
_common
#define MASKABLE_EXCEPTION_ISERIES(n, label) \
.
globl
label
##
_iSeries
; \
label
##
_iSeries
:
\
HMT_MEDIUM
; \
mtspr
SPRN_SPRG1
,
r13
; /* save r13 */ \
RUNLATCH_ON
(
r13
)
; \
EXCEPTION_PROLOG_ISERIES_1
(
PACA_EXGEN
)
; \
lbz
r10
,
PACAPROCENABLED
(
r13
)
; \
cmpwi
0
,
r10
,
0
; \
beq
-
label
##
_iSeries_masked
; \
EXCEPTION_PROLOG_ISERIES_2
; \
b
label
##
_common
; \
#ifdef DO_SOFT_DISABLE
#define DISABLE_INTS \
lbz
r10
,
PACAPROCENABLED
(
r13
)
; \
li
r11
,
0
; \
std
r10
,
SOFTE
(
r1
)
; \
mfmsr
r10
; \
stb
r11
,
PACAPROCENABLED
(
r13
)
; \
ori
r10
,
r10
,
MSR_EE
; \
mtmsrd
r10
,
1
#define ENABLE_INTS \
lbz
r10
,
PACAPROCENABLED
(
r13
)
; \
mfmsr
r11
; \
std
r10
,
SOFTE
(
r1
)
; \
ori
r11
,
r11
,
MSR_EE
; \
mtmsrd
r11
,
1
#else /* hard enable/disable interrupts */
#define DISABLE_INTS
#define ENABLE_INTS \
ld
r12
,
_MSR
(
r1
)
; \
mfmsr
r11
; \
rlwimi
r11
,
r12
,
0
,
MSR_EE
; \
mtmsrd
r11
,
1
#endif
#define STD_EXCEPTION_COMMON(trap, label, hdlr) \
.
align
7
; \
.
globl
label
##
_common
; \
label
##
_common
:
\
EXCEPTION_PROLOG_COMMON
(
trap
,
PACA_EXGEN
)
; \
DISABLE_INTS
; \
bl
.
save_nvgprs
; \
addi
r3
,
r1
,
STACK_FRAME_OVERHEAD
; \
bl
hdlr
; \
b
.
ret_from_except
#define STD_EXCEPTION_COMMON_LITE(trap, label, hdlr) \
.
align
7
; \
.
globl
label
##
_common
; \
label
##
_common
:
\
EXCEPTION_PROLOG_COMMON
(
trap
,
PACA_EXGEN
)
; \
DISABLE_INTS
; \
addi
r3
,
r1
,
STACK_FRAME_OVERHEAD
; \
bl
hdlr
; \
b
.
ret_from_except_lite
/*
*
Start
of
pSeries
system
interrupt
routines
*/
.
=
0x100
.
globl
__start_interrupts
__start_interrupts
:
STD_EXCEPTION_PSERIES
(0
x100
,
system_reset
)
.
=
0x200
_machine_check_pSeries
:
HMT_MEDIUM
mtspr
SPRN_SPRG1
,
r13
/*
save
r13
*/
RUNLATCH_ON
(
r13
)
EXCEPTION_PROLOG_PSERIES
(
PACA_EXMC
,
machine_check_common
)
.
=
0x300
.
globl
data_access_pSeries
data_access_pSeries
:
HMT_MEDIUM
mtspr
SPRN_SPRG1
,
r13
BEGIN_FTR_SECTION
mtspr
SPRN_SPRG2
,
r12
mfspr
r13
,
SPRN_DAR
mfspr
r12
,
SPRN_DSISR
srdi
r13
,
r13
,
60
rlwimi
r13
,
r12
,
16
,
0x20
mfcr
r12
cmpwi
r13
,
0x2c
beq
.
do_stab_bolted_pSeries
mtcrf
0x80
,
r12
mfspr
r12
,
SPRN_SPRG2
END_FTR_SECTION_IFCLR
(
CPU_FTR_SLB
)
EXCEPTION_PROLOG_PSERIES
(
PACA_EXGEN
,
data_access_common
)
.
=
0x380
.
globl
data_access_slb_pSeries
data_access_slb_pSeries
:
HMT_MEDIUM
mtspr
SPRN_SPRG1
,
r13
RUNLATCH_ON
(
r13
)
mfspr
r13
,
SPRN_SPRG3
/*
get
paca
address
into
r13
*/
std
r3
,
PACA_EXSLB
+
EX_R3
(
r13
)
mfspr
r3
,
SPRN_DAR
std
r9
,
PACA_EXSLB
+
EX_R9
(
r13
)
/*
save
r9
-
r12
*/
mfcr
r9
#ifdef __DISABLED__
/
*
Keep
that
around
for
when
we
re
-
implement
dynamic
VSIDs
*/
cmpdi
r3
,
0
bge
slb_miss_user_pseries
#endif /* __DISABLED__ */
std
r10
,
PACA_EXSLB
+
EX_R10
(
r13
)
std
r11
,
PACA_EXSLB
+
EX_R11
(
r13
)
std
r12
,
PACA_EXSLB
+
EX_R12
(
r13
)
mfspr
r10
,
SPRN_SPRG1
std
r10
,
PACA_EXSLB
+
EX_R13
(
r13
)
mfspr
r12
,
SPRN_SRR1
/*
and
SRR1
*/
b
.
slb_miss_realmode
/*
Rel
.
branch
works
in
real
mode
*/
STD_EXCEPTION_PSERIES
(0
x400
,
instruction_access
)
.
=
0x480
.
globl
instruction_access_slb_pSeries
instruction_access_slb_pSeries
:
HMT_MEDIUM
mtspr
SPRN_SPRG1
,
r13
RUNLATCH_ON
(
r13
)
mfspr
r13
,
SPRN_SPRG3
/*
get
paca
address
into
r13
*/
std
r3
,
PACA_EXSLB
+
EX_R3
(
r13
)
mfspr
r3
,
SPRN_SRR0
/*
SRR0
is
faulting
address
*/
std
r9
,
PACA_EXSLB
+
EX_R9
(
r13
)
/*
save
r9
-
r12
*/
mfcr
r9
#ifdef __DISABLED__
/
*
Keep
that
around
for
when
we
re
-
implement
dynamic
VSIDs
*/
cmpdi
r3
,
0
bge
slb_miss_user_pseries
#endif /* __DISABLED__ */
std
r10
,
PACA_EXSLB
+
EX_R10
(
r13
)
std
r11
,
PACA_EXSLB
+
EX_R11
(
r13
)
std
r12
,
PACA_EXSLB
+
EX_R12
(
r13
)
mfspr
r10
,
SPRN_SPRG1
std
r10
,
PACA_EXSLB
+
EX_R13
(
r13
)
mfspr
r12
,
SPRN_SRR1
/*
and
SRR1
*/
b
.
slb_miss_realmode
/*
Rel
.
branch
works
in
real
mode
*/
STD_EXCEPTION_PSERIES
(0
x500
,
hardware_interrupt
)
STD_EXCEPTION_PSERIES
(0
x600
,
alignment
)
STD_EXCEPTION_PSERIES
(0
x700
,
program_check
)
STD_EXCEPTION_PSERIES
(0
x800
,
fp_unavailable
)
STD_EXCEPTION_PSERIES
(0
x900
,
decrementer
)
STD_EXCEPTION_PSERIES
(0
xa00
,
trap_0a
)
STD_EXCEPTION_PSERIES
(0
xb00
,
trap_0b
)
.
=
0xc00
.
globl
system_call_pSeries
system_call_pSeries
:
HMT_MEDIUM
RUNLATCH_ON
(
r9
)
mr
r9
,
r13
mfmsr
r10
mfspr
r13
,
SPRN_SPRG3
mfspr
r11
,
SPRN_SRR0
clrrdi
r12
,
r13
,
32
oris
r12
,
r12
,
system_call_common
@
h
ori
r12
,
r12
,
system_call_common
@
l
mtspr
SPRN_SRR0
,
r12
ori
r10
,
r10
,
MSR_IR|MSR_DR|MSR_RI
mfspr
r12
,
SPRN_SRR1
mtspr
SPRN_SRR1
,
r10
rfid
b
.
/*
prevent
speculative
execution
*/
STD_EXCEPTION_PSERIES
(0
xd00
,
single_step
)
STD_EXCEPTION_PSERIES
(0
xe00
,
trap_0e
)
/
*
We
need
to
deal
with
the
Altivec
unavailable
exception
*
here
which
is
at
0xf20
,
thus
in
the
middle
of
the
*
prolog
code
of
the
PerformanceMonitor
one
.
A
little
*
trickery
is
thus
necessary
*/
.
=
0xf00
b
performance_monitor_pSeries
STD_EXCEPTION_PSERIES
(0
xf20
,
altivec_unavailable
)
STD_EXCEPTION_PSERIES
(0
x1300
,
instruction_breakpoint
)
STD_EXCEPTION_PSERIES
(0
x1700
,
altivec_assist
)
.
=
0x3000
/***
pSeries
interrupt
support
***/
/
*
moved
from
0xf00
*/
STD_EXCEPTION_PSERIES
(.,
performance_monitor
)
.
align
7
_GLOBAL
(
do_stab_bolted_pSeries
)
mtcrf
0x80
,
r12
mfspr
r12
,
SPRN_SPRG2
EXCEPTION_PROLOG_PSERIES
(
PACA_EXSLB
,
.
do_stab_bolted
)
/*
*
We
have
some
room
here
we
use
that
to
put
*
the
peries
slb
miss
user
trampoline
code
so
it
's reasonably
*
away
from
slb_miss_user_common
to
avoid
problems
with
rfid
*
*
This
is
used
for
when
the
SLB
miss
handler
has
to
go
virtual
,
*
which
doesn
't happen for now anymore but will once we re-implement
*
dynamic
VSIDs
for
shared
page
tables
*/
#ifdef __DISABLED__
slb_miss_user_pseries
:
std
r10
,
PACA_EXGEN
+
EX_R10
(
r13
)
std
r11
,
PACA_EXGEN
+
EX_R11
(
r13
)
std
r12
,
PACA_EXGEN
+
EX_R12
(
r13
)
mfspr
r10
,
SPRG1
ld
r11
,
PACA_EXSLB
+
EX_R9
(
r13
)
ld
r12
,
PACA_EXSLB
+
EX_R3
(
r13
)
std
r10
,
PACA_EXGEN
+
EX_R13
(
r13
)
std
r11
,
PACA_EXGEN
+
EX_R9
(
r13
)
std
r12
,
PACA_EXGEN
+
EX_R3
(
r13
)
clrrdi
r12
,
r13
,
32
mfmsr
r10
mfspr
r11
,
SRR0
/*
save
SRR0
*/
ori
r12
,
r12
,
slb_miss_user_common
@
l
/*
virt
addr
of
handler
*/
ori
r10
,
r10
,
MSR_IR|MSR_DR|MSR_RI
mtspr
SRR0
,
r12
mfspr
r12
,
SRR1
/*
and
SRR1
*/
mtspr
SRR1
,
r10
rfid
b
.
/*
prevent
spec
.
execution
*/
#endif /* __DISABLED__ */
/*
*
Vectors
for
the
FWNMI
option
.
Share
common
code
.
*/
.
globl
system_reset_fwnmi
system_reset_fwnmi
:
HMT_MEDIUM
mtspr
SPRN_SPRG1
,
r13
/*
save
r13
*/
RUNLATCH_ON
(
r13
)
EXCEPTION_PROLOG_PSERIES
(
PACA_EXGEN
,
system_reset_common
)
.
globl
machine_check_fwnmi
machine_check_fwnmi
:
HMT_MEDIUM
mtspr
SPRN_SPRG1
,
r13
/*
save
r13
*/
RUNLATCH_ON
(
r13
)
EXCEPTION_PROLOG_PSERIES
(
PACA_EXMC
,
machine_check_common
)
#ifdef CONFIG_PPC_ISERIES
/***
ISeries
-
LPAR
interrupt
handlers
***/
STD_EXCEPTION_ISERIES
(0
x200
,
machine_check
,
PACA_EXMC
)
.
globl
data_access_iSeries
data_access_iSeries
:
mtspr
SPRN_SPRG1
,
r13
BEGIN_FTR_SECTION
mtspr
SPRN_SPRG2
,
r12
mfspr
r13
,
SPRN_DAR
mfspr
r12
,
SPRN_DSISR
srdi
r13
,
r13
,
60
rlwimi
r13
,
r12
,
16
,
0x20
mfcr
r12
cmpwi
r13
,
0x2c
beq
.
do_stab_bolted_iSeries
mtcrf
0x80
,
r12
mfspr
r12
,
SPRN_SPRG2
END_FTR_SECTION_IFCLR
(
CPU_FTR_SLB
)
EXCEPTION_PROLOG_ISERIES_1
(
PACA_EXGEN
)
EXCEPTION_PROLOG_ISERIES_2
b
data_access_common
.
do_stab_bolted_iSeries
:
mtcrf
0x80
,
r12
mfspr
r12
,
SPRN_SPRG2
EXCEPTION_PROLOG_ISERIES_1
(
PACA_EXSLB
)
EXCEPTION_PROLOG_ISERIES_2
b
.
do_stab_bolted
.
globl
data_access_slb_iSeries
data_access_slb_iSeries
:
mtspr
SPRN_SPRG1
,
r13
/*
save
r13
*/
mfspr
r13
,
SPRN_SPRG3
/*
get
paca
address
into
r13
*/
std
r3
,
PACA_EXSLB
+
EX_R3
(
r13
)
mfspr
r3
,
SPRN_DAR
std
r9
,
PACA_EXSLB
+
EX_R9
(
r13
)
mfcr
r9
#ifdef __DISABLED__
cmpdi
r3
,
0
bge
slb_miss_user_iseries
#endif
std
r10
,
PACA_EXSLB
+
EX_R10
(
r13
)
std
r11
,
PACA_EXSLB
+
EX_R11
(
r13
)
std
r12
,
PACA_EXSLB
+
EX_R12
(
r13
)
mfspr
r10
,
SPRN_SPRG1
std
r10
,
PACA_EXSLB
+
EX_R13
(
r13
)
ld
r12
,
PACALPPACA
+
LPPACASRR1
(
r13
)
;
b
.
slb_miss_realmode
STD_EXCEPTION_ISERIES
(0
x400
,
instruction_access
,
PACA_EXGEN
)
.
globl
instruction_access_slb_iSeries
instruction_access_slb_iSeries
:
mtspr
SPRN_SPRG1
,
r13
/*
save
r13
*/
mfspr
r13
,
SPRN_SPRG3
/*
get
paca
address
into
r13
*/
std
r3
,
PACA_EXSLB
+
EX_R3
(
r13
)
ld
r3
,
PACALPPACA
+
LPPACASRR0
(
r13
)
/*
get
SRR0
value
*/
std
r9
,
PACA_EXSLB
+
EX_R9
(
r13
)
mfcr
r9
#ifdef __DISABLED__
cmpdi
r3
,
0
bge
.
slb_miss_user_iseries
#endif
std
r10
,
PACA_EXSLB
+
EX_R10
(
r13
)
std
r11
,
PACA_EXSLB
+
EX_R11
(
r13
)
std
r12
,
PACA_EXSLB
+
EX_R12
(
r13
)
mfspr
r10
,
SPRN_SPRG1
std
r10
,
PACA_EXSLB
+
EX_R13
(
r13
)
ld
r12
,
PACALPPACA
+
LPPACASRR1
(
r13
)
;
b
.
slb_miss_realmode
#ifdef __DISABLED__
slb_miss_user_iseries
:
std
r10
,
PACA_EXGEN
+
EX_R10
(
r13
)
std
r11
,
PACA_EXGEN
+
EX_R11
(
r13
)
std
r12
,
PACA_EXGEN
+
EX_R12
(
r13
)
mfspr
r10
,
SPRG1
ld
r11
,
PACA_EXSLB
+
EX_R9
(
r13
)
ld
r12
,
PACA_EXSLB
+
EX_R3
(
r13
)
std
r10
,
PACA_EXGEN
+
EX_R13
(
r13
)
std
r11
,
PACA_EXGEN
+
EX_R9
(
r13
)
std
r12
,
PACA_EXGEN
+
EX_R3
(
r13
)
EXCEPTION_PROLOG_ISERIES_2
b
slb_miss_user_common
#endif
MASKABLE_EXCEPTION_ISERIES
(0
x500
,
hardware_interrupt
)
STD_EXCEPTION_ISERIES
(0
x600
,
alignment
,
PACA_EXGEN
)
STD_EXCEPTION_ISERIES
(0
x700
,
program_check
,
PACA_EXGEN
)
STD_EXCEPTION_ISERIES
(0
x800
,
fp_unavailable
,
PACA_EXGEN
)
MASKABLE_EXCEPTION_ISERIES
(0
x900
,
decrementer
)
STD_EXCEPTION_ISERIES
(0
xa00
,
trap_0a
,
PACA_EXGEN
)
STD_EXCEPTION_ISERIES
(0
xb00
,
trap_0b
,
PACA_EXGEN
)
.
globl
system_call_iSeries
system_call_iSeries
:
mr
r9
,
r13
mfspr
r13
,
SPRN_SPRG3
EXCEPTION_PROLOG_ISERIES_2
b
system_call_common
STD_EXCEPTION_ISERIES
(
0xd00
,
single_step
,
PACA_EXGEN
)
STD_EXCEPTION_ISERIES
(
0xe00
,
trap_0e
,
PACA_EXGEN
)
STD_EXCEPTION_ISERIES
(
0xf00
,
performance_monitor
,
PACA_EXGEN
)
.
globl
system_reset_iSeries
system_reset_iSeries
:
mfspr
r13
,
SPRN_SPRG3
/*
Get
paca
address
*/
mfmsr
r24
ori
r24
,
r24
,
MSR_RI
mtmsrd
r24
/*
RI
on
*/
lhz
r24
,
PACAPACAINDEX
(
r13
)
/*
Get
processor
#
*/
cmpwi
0
,
r24
,
0
/*
Are
we
processor
0
?
*/
beq
.
__start_initialization_iSeries
/*
Start
up
the
first
processor
*/
mfspr
r4
,
SPRN_CTRLF
li
r5
,
CTRL_RUNLATCH
/*
Turn
off
the
run
light
*/
andc
r4
,
r4
,
r5
mtspr
SPRN_CTRLT
,
r4
1
:
HMT_LOW
#ifdef CONFIG_SMP
lbz
r23
,
PACAPROCSTART
(
r13
)
/*
Test
if
this
processor
*
should
start
*/
sync
LOADADDR
(
r3
,
current_set
)
sldi
r28
,
r24
,
3
/*
get
current_set
[
cpu
#]
*/
ldx
r3
,
r3
,
r28
addi
r1
,
r3
,
THREAD_SIZE
subi
r1
,
r1
,
STACK_FRAME_OVERHEAD
cmpwi
0
,
r23
,
0
beq
iSeries_secondary_smp_loop
/*
Loop
until
told
to
go
*/
bne
.
__secondary_start
/*
Loop
until
told
to
go
*/
iSeries_secondary_smp_loop
:
/
*
Let
the
Hypervisor
know
we
are
alive
*/
/
*
8002
is
a
call
to
HvCallCfg
::
getLps
,
a
harmless
Hypervisor
function
*/
lis
r3
,
0x8002
rldicr
r3
,
r3
,
32
,
15
/*
r0
=
(
r3
<<
32
)
&
0xffff000000000000
*/
#else /* CONFIG_SMP */
/
*
Yield
the
processor
.
This
is
required
for
non
-
SMP
kernels
which
are
running
on
multi
-
threaded
machines
.
*/
lis
r3
,
0x8000
rldicr
r3
,
r3
,
32
,
15
/*
r3
=
(
r3
<<
32
)
&
0xffff000000000000
*/
addi
r3
,
r3
,
18
/*
r3
=
0x8000000000000012
which
is
"yield"
*/
li
r4
,
0
/*
"yield timed"
*/
li
r5
,-
1
/*
"yield forever"
*/
#endif /* CONFIG_SMP */
li
r0
,-
1
/*
r0
=-
1
indicates
a
Hypervisor
call
*/
sc
/*
Invoke
the
hypervisor
via
a
system
call
*/
mfspr
r13
,
SPRN_SPRG3
/*
Put
r13
back
????
*/
b
1
b
/*
If
SMP
not
configured
,
secondaries
*
loop
forever
*/
.
globl
decrementer_iSeries_masked
decrementer_iSeries_masked
:
li
r11
,
1
stb
r11
,
PACALPPACA
+
LPPACADECRINT
(
r13
)
lwz
r12
,
PACADEFAULTDECR
(
r13
)
mtspr
SPRN_DEC
,
r12
/
*
fall
through
*/
.
globl
hardware_interrupt_iSeries_masked
hardware_interrupt_iSeries_masked
:
mtcrf
0x80
,
r9
/*
Restore
regs
*/
ld
r11
,
PACALPPACA
+
LPPACASRR0
(
r13
)
ld
r12
,
PACALPPACA
+
LPPACASRR1
(
r13
)
mtspr
SPRN_SRR0
,
r11
mtspr
SPRN_SRR1
,
r12
ld
r9
,
PACA_EXGEN
+
EX_R9
(
r13
)
ld
r10
,
PACA_EXGEN
+
EX_R10
(
r13
)
ld
r11
,
PACA_EXGEN
+
EX_R11
(
r13
)
ld
r12
,
PACA_EXGEN
+
EX_R12
(
r13
)
ld
r13
,
PACA_EXGEN
+
EX_R13
(
r13
)
rfid
b
.
/*
prevent
speculative
execution
*/
#endif /* CONFIG_PPC_ISERIES */
/***
Common
interrupt
handlers
***/
STD_EXCEPTION_COMMON
(0
x100
,
system_reset
,
.
system_reset_exception
)
/
*
*
Machine
check
is
different
because
we
use
a
different
*
save
area
:
PACA_EXMC
instead
of
PACA_EXGEN
.
*/
.
align
7
.
globl
machine_check_common
machine_check_common
:
EXCEPTION_PROLOG_COMMON
(0
x200
,
PACA_EXMC
)
DISABLE_INTS
bl
.
save_nvgprs
addi
r3
,
r1
,
STACK_FRAME_OVERHEAD
bl
.
machine_check_exception
b
.
ret_from_except
STD_EXCEPTION_COMMON_LITE
(0
x900
,
decrementer
,
.
timer_interrupt
)
STD_EXCEPTION_COMMON
(0
xa00
,
trap_0a
,
.
unknown_exception
)
STD_EXCEPTION_COMMON
(0
xb00
,
trap_0b
,
.
unknown_exception
)
STD_EXCEPTION_COMMON
(0
xd00
,
single_step
,
.
single_step_exception
)
STD_EXCEPTION_COMMON
(0
xe00
,
trap_0e
,
.
unknown_exception
)
STD_EXCEPTION_COMMON
(0
xf00
,
performance_monitor
,
.
performance_monitor_exception
)
STD_EXCEPTION_COMMON
(0
x1300
,
instruction_breakpoint
,
.
instruction_breakpoint_exception
)
#ifdef CONFIG_ALTIVEC
STD_EXCEPTION_COMMON
(0
x1700
,
altivec_assist
,
.
altivec_assist_exception
)
#else
STD_EXCEPTION_COMMON
(0
x1700
,
altivec_assist
,
.
unknown_exception
)
#endif
/*
*
Here
we
have
detected
that
the
kernel
stack
pointer
is
bad
.
*
R9
contains
the
saved
CR
,
r13
points
to
the
paca
,
*
r10
contains
the
(
bad
)
kernel
stack
pointer
,
*
r11
and
r12
contain
the
saved
SRR0
and
SRR1
.
*
We
switch
to
using
an
emergency
stack
,
save
the
registers
there
,
*
and
call
kernel_bad_stack
(),
which
panics
.
*/
bad_stack
:
ld
r1
,
PACAEMERGSP
(
r13
)
subi
r1
,
r1
,
64
+
INT_FRAME_SIZE
std
r9
,
_CCR
(
r1
)
std
r10
,
GPR1
(
r1
)
std
r11
,
_NIP
(
r1
)
std
r12
,
_MSR
(
r1
)
mfspr
r11
,
SPRN_DAR
mfspr
r12
,
SPRN_DSISR
std
r11
,
_DAR
(
r1
)
std
r12
,
_DSISR
(
r1
)
mflr
r10
mfctr
r11
mfxer
r12
std
r10
,
_LINK
(
r1
)
std
r11
,
_CTR
(
r1
)
std
r12
,
_XER
(
r1
)
SAVE_GPR
(0,
r1
)
SAVE_GPR
(2,
r1
)
SAVE_4GPRS
(3,
r1
)
SAVE_2GPRS
(7,
r1
)
SAVE_10GPRS
(12,
r1
)
SAVE_10GPRS
(22,
r1
)
addi
r11
,
r1
,
INT_FRAME_SIZE
std
r11
,
0
(
r1
)
li
r12
,
0
std
r12
,
0
(
r11
)
ld
r2
,
PACATOC
(
r13
)
1
:
addi
r3
,
r1
,
STACK_FRAME_OVERHEAD
bl
.
kernel_bad_stack
b
1
b
/*
*
Return
from
an
exception
with
minimal
checks
.
*
The
caller
is
assumed
to
have
done
EXCEPTION_PROLOG_COMMON
.
*
If
interrupts
have
been
enabled
,
or
anything
has
been
*
done
that
might
have
changed
the
scheduling
status
of
*
any
task
or
sent
any
task
a
signal
,
you
should
use
*
ret_from_except
or
ret_from_except_lite
instead
of
this
.
*/
.
globl
fast_exception_return
fast_exception_return
:
ld
r12
,
_MSR
(
r1
)
ld
r11
,
_NIP
(
r1
)
andi
.
r3
,
r12
,
MSR_RI
/*
check
if
RI
is
set
*/
beq
-
unrecov_fer
ld
r3
,
_CCR
(
r1
)
ld
r4
,
_LINK
(
r1
)
ld
r5
,
_CTR
(
r1
)
ld
r6
,
_XER
(
r1
)
mtcr
r3
mtlr
r4
mtctr
r5
mtxer
r6
REST_GPR
(0,
r1
)
REST_8GPRS
(2,
r1
)
mfmsr
r10
clrrdi
r10
,
r10
,
2
/*
clear
RI
(
LE
is
0
already
)
*/
mtmsrd
r10
,
1
mtspr
SPRN_SRR1
,
r12
mtspr
SPRN_SRR0
,
r11
REST_4GPRS
(10,
r1
)
ld
r1
,
GPR1
(
r1
)
rfid
b
.
/*
prevent
speculative
execution
*/
unrecov_fer
:
bl
.
save_nvgprs
1
:
addi
r3
,
r1
,
STACK_FRAME_OVERHEAD
bl
.
unrecoverable_exception
b
1
b
/*
*
Here
r13
points
to
the
paca
,
r9
contains
the
saved
CR
,
*
SRR0
and
SRR1
are
saved
in
r11
and
r12
,
*
r9
-
r13
are
saved
in
paca
->
exgen
.
*/
.
align
7
.
globl
data_access_common
data_access_common
:
RUNLATCH_ON
(
r10
)
/*
It
wont
fit
in
the
0x300
handler
*/
mfspr
r10
,
SPRN_DAR
std
r10
,
PACA_EXGEN
+
EX_DAR
(
r13
)
mfspr
r10
,
SPRN_DSISR
stw
r10
,
PACA_EXGEN
+
EX_DSISR
(
r13
)
EXCEPTION_PROLOG_COMMON
(0
x300
,
PACA_EXGEN
)
ld
r3
,
PACA_EXGEN
+
EX_DAR
(
r13
)
lwz
r4
,
PACA_EXGEN
+
EX_DSISR
(
r13
)
li
r5
,
0x300
b
.
do_hash_page
/*
Try
to
handle
as
hpte
fault
*/
.
align
7
.
globl
instruction_access_common
instruction_access_common
:
EXCEPTION_PROLOG_COMMON
(0
x400
,
PACA_EXGEN
)
ld
r3
,
_NIP
(
r1
)
andis
.
r4
,
r12
,
0x5820
li
r5
,
0x400
b
.
do_hash_page
/*
Try
to
handle
as
hpte
fault
*/
/*
*
Here
is
the
common
SLB
miss
user
that
is
used
when
going
to
virtual
*
mode
for
SLB
misses
,
that
is
currently
not
used
*/
#ifdef __DISABLED__
.
align
7
.
globl
slb_miss_user_common
slb_miss_user_common
:
mflr
r10
std
r3
,
PACA_EXGEN
+
EX_DAR
(
r13
)
stw
r9
,
PACA_EXGEN
+
EX_CCR
(
r13
)
std
r10
,
PACA_EXGEN
+
EX_LR
(
r13
)
std
r11
,
PACA_EXGEN
+
EX_SRR0
(
r13
)
bl
.
slb_allocate_user
ld
r10
,
PACA_EXGEN
+
EX_LR
(
r13
)
ld
r3
,
PACA_EXGEN
+
EX_R3
(
r13
)
lwz
r9
,
PACA_EXGEN
+
EX_CCR
(
r13
)
ld
r11
,
PACA_EXGEN
+
EX_SRR0
(
r13
)
mtlr
r10
beq
-
slb_miss_fault
andi
.
r10
,
r12
,
MSR_RI
/*
check
for
unrecoverable
exception
*/
beq
-
unrecov_user_slb
mfmsr
r10
.
machine
push
.
machine
"
power4
"
mtcrf
0x80
,
r9
.
machine
pop
clrrdi
r10
,
r10
,
2
/*
clear
RI
before
setting
SRR0
/
1
*/
mtmsrd
r10
,
1
mtspr
SRR0
,
r11
mtspr
SRR1
,
r12
ld
r9
,
PACA_EXGEN
+
EX_R9
(
r13
)
ld
r10
,
PACA_EXGEN
+
EX_R10
(
r13
)
ld
r11
,
PACA_EXGEN
+
EX_R11
(
r13
)
ld
r12
,
PACA_EXGEN
+
EX_R12
(
r13
)
ld
r13
,
PACA_EXGEN
+
EX_R13
(
r13
)
rfid
b
.
slb_miss_fault
:
EXCEPTION_PROLOG_COMMON
(0
x380
,
PACA_EXGEN
)
ld
r4
,
PACA_EXGEN
+
EX_DAR
(
r13
)
li
r5
,
0
std
r4
,
_DAR
(
r1
)
std
r5
,
_DSISR
(
r1
)
b
.
handle_page_fault
unrecov_user_slb
:
EXCEPTION_PROLOG_COMMON
(0
x4200
,
PACA_EXGEN
)
DISABLE_INTS
bl
.
save_nvgprs
1
:
addi
r3
,
r1
,
STACK_FRAME_OVERHEAD
bl
.
unrecoverable_exception
b
1
b
#endif /* __DISABLED__ */
/*
*
r13
points
to
the
PACA
,
r9
contains
the
saved
CR
,
*
r12
contain
the
saved
SRR1
,
SRR0
is
still
ready
for
return
*
r3
has
the
faulting
address
*
r9
-
r13
are
saved
in
paca
->
exslb
.
*
r3
is
saved
in
paca
->
slb_r3
*
We
assume
we
aren
't going to take any exceptions during this procedure.
*/
_GLOBAL
(
slb_miss_realmode
)
mflr
r10
stw
r9
,
PACA_EXSLB
+
EX_CCR
(
r13
)
/*
save
CR
in
exc
.
frame
*/
std
r10
,
PACA_EXSLB
+
EX_LR
(
r13
)
/*
save
LR
*/
bl
.
slb_allocate_realmode
/
*
All
done
--
return
from
exception
.
*/
ld
r10
,
PACA_EXSLB
+
EX_LR
(
r13
)
ld
r3
,
PACA_EXSLB
+
EX_R3
(
r13
)
lwz
r9
,
PACA_EXSLB
+
EX_CCR
(
r13
)
/*
get
saved
CR
*/
#ifdef CONFIG_PPC_ISERIES
ld
r11
,
PACALPPACA
+
LPPACASRR0
(
r13
)
/*
get
SRR0
value
*/
#endif /* CONFIG_PPC_ISERIES */
mtlr
r10
andi
.
r10
,
r12
,
MSR_RI
/*
check
for
unrecoverable
exception
*/
beq
-
unrecov_slb
.
machine
push
.
machine
"
power4
"
mtcrf
0x80
,
r9
mtcrf
0x01
,
r9
/*
slb_allocate
uses
cr0
and
cr7
*/
.
machine
pop
#ifdef CONFIG_PPC_ISERIES
mtspr
SPRN_SRR0
,
r11
mtspr
SPRN_SRR1
,
r12
#endif /* CONFIG_PPC_ISERIES */
ld
r9
,
PACA_EXSLB
+
EX_R9
(
r13
)
ld
r10
,
PACA_EXSLB
+
EX_R10
(
r13
)
ld
r11
,
PACA_EXSLB
+
EX_R11
(
r13
)
ld
r12
,
PACA_EXSLB
+
EX_R12
(
r13
)
ld
r13
,
PACA_EXSLB
+
EX_R13
(
r13
)
rfid
b
.
/*
prevent
speculative
execution
*/
unrecov_slb
:
EXCEPTION_PROLOG_COMMON
(0
x4100
,
PACA_EXSLB
)
DISABLE_INTS
bl
.
save_nvgprs
1
:
addi
r3
,
r1
,
STACK_FRAME_OVERHEAD
bl
.
unrecoverable_exception
b
1
b
.
align
7
.
globl
hardware_interrupt_common
.
globl
hardware_interrupt_entry
hardware_interrupt_common
:
EXCEPTION_PROLOG_COMMON
(0
x500
,
PACA_EXGEN
)
hardware_interrupt_entry
:
DISABLE_INTS
addi
r3
,
r1
,
STACK_FRAME_OVERHEAD
bl
.
do_IRQ
b
.
ret_from_except_lite
.
align
7
.
globl
alignment_common
alignment_common
:
mfspr
r10
,
SPRN_DAR
std
r10
,
PACA_EXGEN
+
EX_DAR
(
r13
)
mfspr
r10
,
SPRN_DSISR
stw
r10
,
PACA_EXGEN
+
EX_DSISR
(
r13
)
EXCEPTION_PROLOG_COMMON
(0
x600
,
PACA_EXGEN
)
ld
r3
,
PACA_EXGEN
+
EX_DAR
(
r13
)
lwz
r4
,
PACA_EXGEN
+
EX_DSISR
(
r13
)
std
r3
,
_DAR
(
r1
)
std
r4
,
_DSISR
(
r1
)
bl
.
save_nvgprs
addi
r3
,
r1
,
STACK_FRAME_OVERHEAD
ENABLE_INTS
bl
.
alignment_exception
b
.
ret_from_except
.
align
7
.
globl
program_check_common
program_check_common
:
EXCEPTION_PROLOG_COMMON
(0
x700
,
PACA_EXGEN
)
bl
.
save_nvgprs
addi
r3
,
r1
,
STACK_FRAME_OVERHEAD
ENABLE_INTS
bl
.
program_check_exception
b
.
ret_from_except
.
align
7
.
globl
fp_unavailable_common
fp_unavailable_common
:
EXCEPTION_PROLOG_COMMON
(0
x800
,
PACA_EXGEN
)
bne
.
load_up_fpu
/*
if
from
user
,
just
load
it
up
*/
bl
.
save_nvgprs
addi
r3
,
r1
,
STACK_FRAME_OVERHEAD
ENABLE_INTS
bl
.
kernel_fp_unavailable_exception
BUG_OPCODE
.
align
7
.
globl
altivec_unavailable_common
altivec_unavailable_common
:
EXCEPTION_PROLOG_COMMON
(0
xf20
,
PACA_EXGEN
)
#ifdef CONFIG_ALTIVEC
BEGIN_FTR_SECTION
bne
.
load_up_altivec
/*
if
from
user
,
just
load
it
up
*/
END_FTR_SECTION_IFSET
(
CPU_FTR_ALTIVEC
)
#endif
bl
.
save_nvgprs
addi
r3
,
r1
,
STACK_FRAME_OVERHEAD
ENABLE_INTS
bl
.
altivec_unavailable_exception
b
.
ret_from_except
#ifdef CONFIG_ALTIVEC
/*
*
load_up_altivec
(
unused
,
unused
,
tsk
)
*
Disable
VMX
for
the
task
which
had
it
previously
,
*
and
save
its
vector
registers
in
its
thread_struct
.
*
Enables
the
VMX
for
use
in
the
kernel
on
return
.
*
On
SMP
we
know
the
VMX
is
free
,
since
we
give
it
up
every
*
switch
(
ie
,
no
lazy
save
of
the
vector
registers
)
.
*
On
entry
:
r13
==
'current'
&&
last_task_used_altivec
!=
'current'
*/
_STATIC
(
load_up_altivec
)
mfmsr
r5
/*
grab
the
current
MSR
*/
oris
r5
,
r5
,
MSR_VEC
@
h
mtmsrd
r5
/*
enable
use
of
VMX
now
*/
isync
/*
*
For
SMP
,
we
don
't do lazy VMX switching because it just gets too
*
horrendously
complex
,
especially
when
a
task
switches
from
one
CPU
*
to
another
.
Instead
we
call
giveup_altvec
in
switch_to
.
*
VRSAVE
isn
't dealt with here, that is done in the normal context
*
switch
code
.
Note
that
we
could
rely
on
vrsave
value
to
eventually
*
avoid
saving
all
of
the
VREGs
here
...
*/
#ifndef CONFIG_SMP
ld
r3
,
last_task_used_altivec
@
got
(
r2
)
ld
r4
,
0
(
r3
)
cmpdi
0
,
r4
,
0
beq
1
f
/
*
Save
VMX
state
to
last_task_used_altivec
's THREAD struct */
addi
r4
,
r4
,
THREAD
SAVE_32VRS
(0,
r5
,
r4
)
mfvscr
vr0
li
r10
,
THREAD_VSCR
stvx
vr0
,
r10
,
r4
/
*
Disable
VMX
for
last_task_used_altivec
*/
ld
r5
,
PT_REGS
(
r4
)
ld
r4
,
_MSR
-
STACK_FRAME_OVERHEAD
(
r5
)
lis
r6
,
MSR_VEC
@
h
andc
r4
,
r4
,
r6
std
r4
,
_MSR
-
STACK_FRAME_OVERHEAD
(
r5
)
1
:
#endif /* CONFIG_SMP */
/
*
Hack
:
if
we
get
an
altivec
unavailable
trap
with
VRSAVE
*
set
to
all
zeros
,
we
assume
this
is
a
broken
application
*
that
fails
to
set
it
properly
,
and
thus
we
switch
it
to
*
all
1
's
*/
mfspr
r4
,
SPRN_VRSAVE
cmpdi
0
,
r4
,
0
bne
+
1
f
li
r4
,-
1
mtspr
SPRN_VRSAVE
,
r4
1
:
/
*
enable
use
of
VMX
after
return
*/
ld
r4
,
PACACURRENT
(
r13
)
addi
r5
,
r4
,
THREAD
/*
Get
THREAD
*/
oris
r12
,
r12
,
MSR_VEC
@
h
std
r12
,
_MSR
(
r1
)
li
r4
,
1
li
r10
,
THREAD_VSCR
stw
r4
,
THREAD_USED_VR
(
r5
)
lvx
vr0
,
r10
,
r5
mtvscr
vr0
REST_32VRS
(0,
r4
,
r5
)
#ifndef CONFIG_SMP
/
*
Update
last_task_used_math
to
'current'
*/
subi
r4
,
r5
,
THREAD
/*
Back
to
'current'
*/
std
r4
,
0
(
r3
)
#endif /* CONFIG_SMP */
/
*
restore
registers
and
return
*/
b
fast_exception_return
#endif /* CONFIG_ALTIVEC */
/*
*
Hash
table
stuff
*/
.
align
7
_GLOBAL
(
do_hash_page
)
std
r3
,
_DAR
(
r1
)
std
r4
,
_DSISR
(
r1
)
andis
.
r0
,
r4
,
0xa450
/*
weird
error
?
*/
bne
-
.
handle_page_fault
/*
if
not
,
try
to
insert
a
HPTE
*/
BEGIN_FTR_SECTION
andis
.
r0
,
r4
,
0x0020
/*
Is
it
a
segment
table
fault
?
*/
bne
-
.
do_ste_alloc
/*
If
so
handle
it
*/
END_FTR_SECTION_IFCLR
(
CPU_FTR_SLB
)
/
*
*
We
need
to
set
the
_PAGE_USER
bit
if
MSR_PR
is
set
or
if
we
are
*
accessing
a
userspace
segment
(
even
from
the
kernel
)
.
We
assume
*
kernel
addresses
always
have
the
high
bit
set
.
*/
rlwinm
r4
,
r4
,
32
-
25
+
9
,
31
-
9
,
31
-
9
/*
DSISR_STORE
->
_PAGE_RW
*/
rotldi
r0
,
r3
,
15
/*
Move
high
bit
into
MSR_PR
posn
*/
orc
r0
,
r12
,
r0
/*
MSR_PR
|
~
high_bit
*/
rlwimi
r4
,
r0
,
32
-
13
,
30
,
30
/*
becomes
_PAGE_USER
access
bit
*/
ori
r4
,
r4
,
1
/*
add
_PAGE_PRESENT
*/
rlwimi
r4
,
r5
,
22
+
2
,
31
-
2
,
31
-
2
/*
Set
_PAGE_EXEC
if
trap
is
0x400
*/
/
*
*
On
iSeries
,
we
soft
-
disable
interrupts
here
,
then
*
hard
-
enable
interrupts
so
that
the
hash_page
code
can
spin
on
*
the
hash_table_lock
without
problems
on
a
shared
processor
.
*/
DISABLE_INTS
/
*
*
r3
contains
the
faulting
address
*
r4
contains
the
required
access
permissions
*
r5
contains
the
trap
number
*
*
at
return
r3
=
0
for
success
*/
bl
.
hash_page
/*
build
HPTE
if
possible
*/
cmpdi
r3
,
0
/*
see
if
hash_page
succeeded
*/
#ifdef DO_SOFT_DISABLE
/
*
*
If
we
had
interrupts
soft
-
enabled
at
the
point
where
the
*
DSI
/
ISI
occurred
,
and
an
interrupt
came
in
during
hash_page
,
*
handle
it
now
.
*
We
jump
to
ret_from_except_lite
rather
than
fast_exception_return
*
because
ret_from_except_lite
will
check
for
and
handle
pending
*
interrupts
if
necessary
.
*/
beq
.
ret_from_except_lite
/
*
For
a
hash
failure
,
we
don
't bother re-enabling interrupts */
ble
-
12
f
/
*
*
hash_page
couldn
't handle it, set soft interrupt enable back
*
to
what
it
was
before
the
trap
.
Note
that
.
local_irq_restore
*
handles
any
interrupts
pending
at
this
point
.
*/
ld
r3
,
SOFTE
(
r1
)
bl
.
local_irq_restore
b
11
f
#else
beq
fast_exception_return
/*
Return
from
exception
on
success
*/
ble
-
12
f
/*
Failure
return
from
hash_page
*/
/
*
fall
through
*/
#endif
/*
Here
we
have
a
page
fault
that
hash_page
can
't handle. */
_GLOBAL
(
handle_page_fault
)
ENABLE_INTS
11
:
ld
r4
,
_DAR
(
r1
)
ld
r5
,
_DSISR
(
r1
)
addi
r3
,
r1
,
STACK_FRAME_OVERHEAD
bl
.
do_page_fault
cmpdi
r3
,
0
beq
+
.
ret_from_except_lite
bl
.
save_nvgprs
mr
r5
,
r3
addi
r3
,
r1
,
STACK_FRAME_OVERHEAD
lwz
r4
,
_DAR
(
r1
)
bl
.
bad_page_fault
b
.
ret_from_except
/*
We
have
a
page
fault
that
hash_page
could
handle
but
HV
refused
*
the
PTE
insertion
*/
12
:
bl
.
save_nvgprs
addi
r3
,
r1
,
STACK_FRAME_OVERHEAD
lwz
r4
,
_DAR
(
r1
)
bl
.
low_hash_fault
b
.
ret_from_except
/
*
here
we
have
a
segment
miss
*/
_GLOBAL
(
do_ste_alloc
)
bl
.
ste_allocate
/*
try
to
insert
stab
entry
*/
cmpdi
r3
,
0
beq
+
fast_exception_return
b
.
handle_page_fault
/*
*
r13
points
to
the
PACA
,
r9
contains
the
saved
CR
,
*
r11
and
r12
contain
the
saved
SRR0
and
SRR1
.
*
r9
-
r13
are
saved
in
paca
->
exslb
.
*
We
assume
we
aren
't going to take any exceptions during this procedure.
*
We
assume
(
DAR
>>
60
)
==
0xc
.
*/
.
align
7
_GLOBAL
(
do_stab_bolted
)
stw
r9
,
PACA_EXSLB
+
EX_CCR
(
r13
)
/*
save
CR
in
exc
.
frame
*/
std
r11
,
PACA_EXSLB
+
EX_SRR0
(
r13
)
/*
save
SRR0
in
exc
.
frame
*/
/
*
Hash
to
the
primary
group
*/
ld
r10
,
PACASTABVIRT
(
r13
)
mfspr
r11
,
SPRN_DAR
srdi
r11
,
r11
,
28
rldimi
r10
,
r11
,
7
,
52
/*
r10
=
first
ste
of
the
group
*/
/
*
Calculate
VSID
*/
/
*
This
is
a
kernel
address
,
so
protovsid
=
ESID
*/
ASM_VSID_SCRAMBLE
(
r11
,
r9
)
rldic
r9
,
r11
,
12
,
16
/*
r9
=
vsid
<<
12
*/
/
*
Search
the
primary
group
for
a
free
entry
*/
1
:
ld
r11
,
0
(
r10
)
/*
Test
valid
bit
of
the
current
ste
*/
andi
.
r11
,
r11
,
0x80
beq
2
f
addi
r10
,
r10
,
16
andi
.
r11
,
r10
,
0x70
bne
1
b
/
*
Stick
for
only
searching
the
primary
group
for
now
.
*/
/
*
At
least
for
now
,
we
use
a
very
simple
random
castout
scheme
*/
/
*
Use
the
TB
as
a
random
number
; OR in 1 to avoid entry 0 */
mftb
r11
rldic
r11
,
r11
,
4
,
57
/*
r11
=
(
r11
<<
4
)
&
0x70
*/
ori
r11
,
r11
,
0x10
/
*
r10
currently
points
to
an
ste
one
past
the
group
of
interest
*/
/
*
make
it
point
to
the
randomly
selected
entry
*/
subi
r10
,
r10
,
128
or
r10
,
r10
,
r11
/*
r10
is
the
entry
to
invalidate
*/
isync
/*
mark
the
entry
invalid
*/
ld
r11
,
0
(
r10
)
rldicl
r11
,
r11
,
56
,
1
/*
clear
the
valid
bit
*/
rotldi
r11
,
r11
,
8
std
r11
,
0
(
r10
)
sync
clrrdi
r11
,
r11
,
28
/*
Get
the
esid
part
of
the
ste
*/
slbie
r11
2
:
std
r9
,
8
(
r10
)
/*
Store
the
vsid
part
of
the
ste
*/
eieio
mfspr
r11
,
SPRN_DAR
/*
Get
the
new
esid
*/
clrrdi
r11
,
r11
,
28
/*
Permits
a
full
32
b
of
ESID
*/
ori
r11
,
r11
,
0x90
/*
Turn
on
valid
and
kp
*/
std
r11
,
0
(
r10
)
/*
Put
new
entry
back
into
the
stab
*/
sync
/
*
All
done
--
return
from
exception
.
*/
lwz
r9
,
PACA_EXSLB
+
EX_CCR
(
r13
)
/*
get
saved
CR
*/
ld
r11
,
PACA_EXSLB
+
EX_SRR0
(
r13
)
/*
get
saved
SRR0
*/
andi
.
r10
,
r12
,
MSR_RI
beq
-
unrecov_slb
mtcrf
0x80
,
r9
/*
restore
CR
*/
mfmsr
r10
clrrdi
r10
,
r10
,
2
mtmsrd
r10
,
1
mtspr
SPRN_SRR0
,
r11
mtspr
SPRN_SRR1
,
r12
ld
r9
,
PACA_EXSLB
+
EX_R9
(
r13
)
ld
r10
,
PACA_EXSLB
+
EX_R10
(
r13
)
ld
r11
,
PACA_EXSLB
+
EX_R11
(
r13
)
ld
r12
,
PACA_EXSLB
+
EX_R12
(
r13
)
ld
r13
,
PACA_EXSLB
+
EX_R13
(
r13
)
rfid
b
.
/*
prevent
speculative
execution
*/
/*
*
Space
for
CPU0
's segment table.
*
*
On
iSeries
,
the
hypervisor
must
fill
in
at
least
one
entry
before
*
we
get
control
(
with
relocate
on
)
.
The
address
is
give
to
the
hv
*
as
a
page
number
(
see
xLparMap
in
lpardata
.
c
),
so
this
must
be
at
a
*
fixed
address
(
the
linker
can
't compute (u64)&initial_stab >>
*
PAGE_SHIFT
)
.
*/
.
=
STAB0_PHYS_ADDR
/*
0x6000
*/
.
globl
initial_stab
initial_stab
:
.
space
4096
/*
*
Data
area
reserved
for
FWNMI
option
.
*
This
address
(
0x7000
)
is
fixed
by
the
RPA
.
*/
.
=
0x7000
.
globl
fwnmi_data_area
fwnmi_data_area
:
/
*
iSeries
does
not
use
the
FWNMI
stuff
,
so
it
is
safe
to
put
*
this
here
,
even
if
we
later
allow
kernels
that
will
boot
on
*
both
pSeries
and
iSeries
*/
#ifdef CONFIG_PPC_ISERIES
.
=
LPARMAP_PHYS
#include "lparmap.s"
/*
*
This
".text"
is
here
for
old
compilers
that
generate
a
trailing
*
.
note
section
when
compiling
.
c
files
to
.
s
*/
.
text
#endif /* CONFIG_PPC_ISERIES */
.
=
0x8000
/*
*
On
pSeries
,
secondary
processors
spin
in
the
following
code
.
*
At
entry
,
r3
=
this
processor
's number (physical cpu id)
*/
_GLOBAL
(
pSeries_secondary_smp_init
)
mr
r24
,
r3
/
*
turn
on
64
-
bit
mode
*/
bl
.
enable_64b_mode
isync
/
*
Copy
some
CPU
settings
from
CPU
0
*/
bl
.
__restore_cpu_setup
/
*
Set
up
a
paca
value
for
this
processor
.
Since
we
have
the
*
physical
cpu
id
in
r24
,
we
need
to
search
the
pacas
to
find
*
which
logical
id
maps
to
our
physical
one
.
*/
LOADADDR
(
r13
,
paca
)
/*
Get
base
vaddr
of
paca
array
*/
li
r5
,
0
/*
logical
cpu
id
*/
1
:
lhz
r6
,
PACAHWCPUID
(
r13
)
/*
Load
HW
procid
from
paca
*/
cmpw
r6
,
r24
/*
Compare
to
our
id
*/
beq
2
f
addi
r13
,
r13
,
PACA_SIZE
/*
Loop
to
next
PACA
on
miss
*/
addi
r5
,
r5
,
1
cmpwi
r5
,
NR_CPUS
blt
1
b
mr
r3
,
r24
/*
not
found
,
copy
phys
to
r3
*/
b
.
kexec_wait
/*
next
kernel
might
do
better
*/
2
:
mtspr
SPRN_SPRG3
,
r13
/*
Save
vaddr
of
paca
in
SPRG3
*/
/
*
From
now
on
,
r24
is
expected
to
be
logical
cpuid
*/
mr
r24
,
r5
3
:
HMT_LOW
lbz
r23
,
PACAPROCSTART
(
r13
)
/*
Test
if
this
processor
should
*/
/
*
start
.
*/
sync
/
*
Create
a
temp
kernel
stack
for
use
before
relocation
is
on
.
*/
ld
r1
,
PACAEMERGSP
(
r13
)
subi
r1
,
r1
,
STACK_FRAME_OVERHEAD
cmpwi
0
,
r23
,
0
#ifdef CONFIG_SMP
bne
.
__secondary_start
#endif
b
3
b
/*
Loop
until
told
to
go
*/
#ifdef CONFIG_PPC_ISERIES
_STATIC
(
__start_initialization_iSeries
)
/
*
Clear
out
the
BSS
*/
LOADADDR
(
r11
,
__bss_stop
)
LOADADDR
(
r8
,
__bss_start
)
sub
r11
,
r11
,
r8
/*
bss
size
*/
addi
r11
,
r11
,
7
/*
round
up
to
an
even
double
word
*/
rldicl
.
r11
,
r11
,
61
,
3
/*
shift
right
by
3
*/
beq
4
f
addi
r8
,
r8
,-
8
li
r0
,
0
mtctr
r11
/*
zero
this
many
doublewords
*/
3
:
stdu
r0
,
8
(
r8
)
bdnz
3
b
4
:
LOADADDR
(
r1
,
init_thread_union
)
addi
r1
,
r1
,
THREAD_SIZE
li
r0
,
0
stdu
r0
,-
STACK_FRAME_OVERHEAD
(
r1
)
LOADADDR
(
r3
,
cpu_specs
)
LOADADDR
(
r4
,
cur_cpu_spec
)
li
r5
,
0
bl
.
identify_cpu
LOADADDR
(
r2
,
__toc_start
)
addi
r2
,
r2
,
0x4000
addi
r2
,
r2
,
0x4000
bl
.
iSeries_early_setup
bl
.
early_setup
/
*
relocation
is
on
at
this
point
*/
b
.
start_here_common
#endif /* CONFIG_PPC_ISERIES */
#ifdef CONFIG_PPC_MULTIPLATFORM
_STATIC
(
__mmu_off
)
mfmsr
r3
andi
.
r0
,
r3
,
MSR_IR
|
MSR_DR
beqlr
andc
r3
,
r3
,
r0
mtspr
SPRN_SRR0
,
r4
mtspr
SPRN_SRR1
,
r3
sync
rfid
b
.
/*
prevent
speculative
execution
*/
/*
*
Here
is
our
main
kernel
entry
point
.
We
support
currently
2
kind
of
entries
*
depending
on
the
value
of
r5
.
*
*
r5
!=
NULL
->
OF
entry
,
we
go
to
prom_init
,
"legacy"
parameter
content
*
in
r3
...
r7
*
*
r5
==
NULL
->
kexec
style
entry
.
r3
is
a
physical
pointer
to
the
*
DT
block
,
r4
is
a
physical
pointer
to
the
kernel
itself
*
*/
_GLOBAL
(
__start_initialization_multiplatform
)
/
*
*
Are
we
booted
from
a
PROM
Of
-
type
client
-
interface
?
*/
cmpldi
cr0
,
r5
,
0
bne
.
__boot_from_prom
/*
yes
->
prom
*/
/
*
Save
parameters
*/
mr
r31
,
r3
mr
r30
,
r4
/
*
Make
sure
we
are
running
in
64
bits
mode
*/
bl
.
enable_64b_mode
/
*
Setup
some
critical
970
SPRs
before
switching
MMU
off
*/
bl
.
__970_cpu_preinit
/
*
cpu
#
*/
li
r24
,
0
/
*
Switch
off
MMU
if
not
already
*/
LOADADDR
(
r4
,
.
__after_prom_start
-
KERNELBASE
)
add
r4
,
r4
,
r30
bl
.
__mmu_off
b
.
__after_prom_start
_STATIC
(
__boot_from_prom
)
/
*
Save
parameters
*/
mr
r31
,
r3
mr
r30
,
r4
mr
r29
,
r5
mr
r28
,
r6
mr
r27
,
r7
/
*
Make
sure
we
are
running
in
64
bits
mode
*/
bl
.
enable_64b_mode
/
*
put
a
relocation
offset
into
r3
*/
bl
.
reloc_offset
LOADADDR
(
r2
,
__toc_start
)
addi
r2
,
r2
,
0x4000
addi
r2
,
r2
,
0x4000
/
*
Relocate
the
TOC
from
a
virt
addr
to
a
real
addr
*/
sub
r2
,
r2
,
r3
/
*
Restore
parameters
*/
mr
r3
,
r31
mr
r4
,
r30
mr
r5
,
r29
mr
r6
,
r28
mr
r7
,
r27
/
*
Do
all
of
the
interaction
with
OF
client
interface
*/
bl
.
prom_init
/
*
We
never
return
*/
trap
/*
*
At
this
point
,
r3
contains
the
physical
address
we
are
running
at
,
*
returned
by
prom_init
()
*/
_STATIC
(
__after_prom_start
)
/*
*
We
need
to
run
with
__start
at
physical
address
0
.
*
This
will
leave
some
code
in
the
first
256
B
of
*
real
memory
,
which
are
reserved
for
software
use
.
*
The
remainder
of
the
first
page
is
loaded
with
the
fixed
*
interrupt
vectors
.
The
next
two
pages
are
filled
with
*
unknown
exception
placeholders
.
*
*
Note
:
This
process
overwrites
the
OF
exception
vectors
.
*
r26
==
relocation
offset
*
r27
==
KERNELBASE
*/
bl
.
reloc_offset
mr
r26
,
r3
SET_REG_TO_CONST
(
r27
,
KERNELBASE
)
li
r3
,
0
/*
target
addr
*/
//
XXX
FIXME
:
Use
phys
returned
by
OF
(
r30
)
sub
r4
,
r27
,
r26
/*
source
addr
*/
/
*
current
address
of
_start
*/
/
*
i
.
e
.
where
we
are
running
*/
/
*
the
source
addr
*/
LOADADDR
(
r5
,
copy_to_here
)
/*
#
bytes
of
memory
to
copy
*/
sub
r5
,
r5
,
r27
li
r6
,
0x100
/*
Start
offset
,
the
first
0x100
*/
/
*
bytes
were
copied
earlier
.
*/
bl
.
copy_and_flush
/*
copy
the
first
n
bytes
*/
/
*
this
includes
the
code
being
*/
/
*
executed
here
.
*/
LOADADDR
(
r0
,
4
f
)
/*
Jump
to
the
copy
of
this
code
*/
mtctr
r0
/*
that
we
just
made
/
relocated
*/
bctr
4
:
LOADADDR
(
r5
,
klimit
)
sub
r5
,
r5
,
r26
ld
r5
,
0
(
r5
)
/*
get
the
value
of
klimit
*/
sub
r5
,
r5
,
r27
bl
.
copy_and_flush
/*
copy
the
rest
*/
b
.
start_here_multiplatform
#endif /* CONFIG_PPC_MULTIPLATFORM */
/*
*
Copy
routine
used
to
copy
the
kernel
to
start
at
physical
address
0
*
and
flush
and
invalidate
the
caches
as
needed
.
*
r3
=
dest
addr
,
r4
=
source
addr
,
r5
=
copy
limit
,
r6
=
start
offset
*
on
exit
,
r3
,
r4
,
r5
are
unchanged
,
r6
is
updated
to
be
>=
r5
.
*
*
Note
:
this
routine
*
only
*
clobbers
r0
,
r6
and
lr
*/
_GLOBAL
(
copy_and_flush
)
addi
r5
,
r5
,-
8
addi
r6
,
r6
,-
8
4
:
li
r0
,
16
/*
Use
the
least
common
*/
/
*
denominator
cache
line
*/
/
*
size
.
This
results
in
*/
/
*
extra
cache
line
flushes
*/
/
*
but
operation
is
correct
.
*/
/
*
Can
't get cache line size */
/
*
from
NACA
as
it
is
being
*/
/
*
moved
too
.
*/
mtctr
r0
/*
put
#
words
/
line
in
ctr
*/
3
:
addi
r6
,
r6
,
8
/*
copy
a
cache
line
*/
ldx
r0
,
r6
,
r4
stdx
r0
,
r6
,
r3
bdnz
3
b
dcbst
r6
,
r3
/*
write
it
to
memory
*/
sync
icbi
r6
,
r3
/*
flush
the
icache
line
*/
cmpld
0
,
r6
,
r5
blt
4
b
sync
addi
r5
,
r5
,
8
addi
r6
,
r6
,
8
blr
.
align
8
copy_to_here
:
#ifdef CONFIG_SMP
#ifdef CONFIG_PPC_PMAC
/*
*
On
PowerMac
,
secondary
processors
starts
from
the
reset
vector
,
which
*
is
temporarily
turned
into
a
call
to
one
of
the
functions
below
.
*/
.
section
".text"
;
.
align
2
;
.
globl
__secondary_start_pmac_0
__secondary_start_pmac_0
:
/
*
NB
the
entries
for
cpus
0
,
1
,
2
must
each
occupy
8
bytes
.
*/
li
r24
,
0
b
1
f
li
r24
,
1
b
1
f
li
r24
,
2
b
1
f
li
r24
,
3
1
:
_GLOBAL
(
pmac_secondary_start
)
/
*
turn
on
64
-
bit
mode
*/
bl
.
enable_64b_mode
isync
/
*
Copy
some
CPU
settings
from
CPU
0
*/
bl
.
__restore_cpu_setup
/
*
pSeries
do
that
early
though
I
don
't think we really need it */
mfmsr
r3
ori
r3
,
r3
,
MSR_RI
mtmsrd
r3
/*
RI
on
*/
/
*
Set
up
a
paca
value
for
this
processor
.
*/
LOADADDR
(
r4
,
paca
)
/*
Get
base
vaddr
of
paca
array
*/
mulli
r13
,
r24
,
PACA_SIZE
/*
Calculate
vaddr
of
right
paca
*/
add
r13
,
r13
,
r4
/*
for
this
processor
.
*/
mtspr
SPRN_SPRG3
,
r13
/*
Save
vaddr
of
paca
in
SPRG3
*/
/
*
Create
a
temp
kernel
stack
for
use
before
relocation
is
on
.
*/
ld
r1
,
PACAEMERGSP
(
r13
)
subi
r1
,
r1
,
STACK_FRAME_OVERHEAD
b
.
__secondary_start
#endif /* CONFIG_PPC_PMAC */
/*
*
This
function
is
called
after
the
master
CPU
has
released
the
*
secondary
processors
.
The
execution
environment
is
relocation
off
.
*
The
paca
for
this
processor
has
the
following
fields
initialized
at
*
this
point
:
*
1
.
Processor
number
*
2
.
Segment
table
pointer
(
virtual
address
)
*
On
entry
the
following
are
set
:
*
r1
=
stack
pointer
.
vaddr
for
iSeries
,
raddr
(
temp
stack
)
for
pSeries
*
r24
=
cpu
#
(
in
Linux
terms
)
*
r13
=
paca
virtual
address
*
SPRG3
=
paca
virtual
address
*/
_GLOBAL
(
__secondary_start
)
HMT_MEDIUM
/*
Set
thread
priority
to
MEDIUM
*/
ld
r2
,
PACATOC
(
r13
)
/
*
Do
early
setup
for
that
CPU
*/
bl
.
early_setup_secondary
/
*
Initialize
the
kernel
stack
.
Just
a
repeat
for
iSeries
.
*/
LOADADDR
(
r3
,
current_set
)
sldi
r28
,
r24
,
3
/*
get
current_set
[
cpu
#]
*/
ldx
r1
,
r3
,
r28
addi
r1
,
r1
,
THREAD_SIZE
-
STACK_FRAME_OVERHEAD
std
r1
,
PACAKSAVE
(
r13
)
li
r7
,
0
mtlr
r7
/
*
enable
MMU
and
jump
to
start_secondary
*/
LOADADDR
(
r3
,.
start_secondary_prolog
)
SET_REG_TO_CONST
(
r4
,
MSR_KERNEL
)
#ifdef DO_SOFT_DISABLE
ori
r4
,
r4
,
MSR_EE
#endif
mtspr
SPRN_SRR0
,
r3
mtspr
SPRN_SRR1
,
r4
rfid
b
.
/*
prevent
speculative
execution
*/
/*
*
Running
with
relocation
on
at
this
point
.
All
we
want
to
do
is
*
zero
the
stack
back
-
chain
pointer
before
going
into
C
code
.
*/
_GLOBAL
(
start_secondary_prolog
)
li
r3
,
0
std
r3
,
0
(
r1
)
/*
Zero
the
stack
frame
pointer
*/
bl
.
start_secondary
#endif
/*
*
This
subroutine
clobbers
r11
and
r12
*/
_GLOBAL
(
enable_64b_mode
)
mfmsr
r11
/*
grab
the
current
MSR
*/
li
r12
,
1
rldicr
r12
,
r12
,
MSR_SF_LG
,(
63
-
MSR_SF_LG
)
or
r11
,
r11
,
r12
li
r12
,
1
rldicr
r12
,
r12
,
MSR_ISF_LG
,(
63
-
MSR_ISF_LG
)
or
r11
,
r11
,
r12
mtmsrd
r11
isync
blr
#ifdef CONFIG_PPC_MULTIPLATFORM
/*
*
This
is
where
the
main
kernel
code
starts
.
*/
_STATIC
(
start_here_multiplatform
)
/
*
get
a
new
offset
,
now
that
the
kernel
has
moved
.
*/
bl
.
reloc_offset
mr
r26
,
r3
/
*
Clear
out
the
BSS
.
It
may
have
been
done
in
prom_init
,
*
already
but
that
's irrelevant since prom_init will soon
*
be
detached
from
the
kernel
completely
.
Besides
,
we
need
*
to
clear
it
now
for
kexec
-
style
entry
.
*/
LOADADDR
(
r11
,
__bss_stop
)
LOADADDR
(
r8
,
__bss_start
)
sub
r11
,
r11
,
r8
/*
bss
size
*/
addi
r11
,
r11
,
7
/*
round
up
to
an
even
double
word
*/
rldicl
.
r11
,
r11
,
61
,
3
/*
shift
right
by
3
*/
beq
4
f
addi
r8
,
r8
,-
8
li
r0
,
0
mtctr
r11
/*
zero
this
many
doublewords
*/
3
:
stdu
r0
,
8
(
r8
)
bdnz
3
b
4
:
mfmsr
r6
ori
r6
,
r6
,
MSR_RI
mtmsrd
r6
/*
RI
on
*/
#ifdef CONFIG_HMT
/
*
Start
up
the
second
thread
on
cpu
0
*/
mfspr
r3
,
SPRN_PVR
srwi
r3
,
r3
,
16
cmpwi
r3
,
0x34
/*
Pulsar
*/
beq
90
f
cmpwi
r3
,
0x36
/*
Icestar
*/
beq
90
f
cmpwi
r3
,
0x37
/*
SStar
*/
beq
90
f
b
91
f
/*
HMT
not
supported
*/
90
:
li
r3
,
0
bl
.
hmt_start_secondary
91
:
#endif
/
*
The
following
gets
the
stack
and
TOC
set
up
with
the
regs
*/
/
*
pointing
to
the
real
addr
of
the
kernel
stack
.
This
is
*/
/
*
all
done
to
support
the
C
function
call
below
which
sets
*/
/
*
up
the
htab
.
This
is
done
because
we
have
relocated
the
*/
/
*
kernel
but
are
still
running
in
real
mode
.
*/
LOADADDR
(
r3
,
init_thread_union
)
sub
r3
,
r3
,
r26
/
*
set
up
a
stack
pointer
(
physical
address
)
*/
addi
r1
,
r3
,
THREAD_SIZE
li
r0
,
0
stdu
r0
,-
STACK_FRAME_OVERHEAD
(
r1
)
/
*
set
up
the
TOC
(
physical
address
)
*/
LOADADDR
(
r2
,
__toc_start
)
addi
r2
,
r2
,
0x4000
addi
r2
,
r2
,
0x4000
sub
r2
,
r2
,
r26
LOADADDR
(
r3
,
cpu_specs
)
sub
r3
,
r3
,
r26
LOADADDR
(
r4
,
cur_cpu_spec
)
sub
r4
,
r4
,
r26
mr
r5
,
r26
bl
.
identify_cpu
/
*
Save
some
low
level
config
HIDs
of
CPU0
to
be
copied
to
*
other
CPUs
later
on
,
or
used
for
suspend
/
resume
*/
bl
.
__save_cpu_setup
sync
/
*
Setup
a
valid
physical
PACA
pointer
in
SPRG3
for
early_setup
*
note
that
boot_cpuid
can
always
be
0
nowadays
since
there
is
*
nowhere
it
can
be
initialized
differently
before
we
reach
this
*
code
*/
LOADADDR
(
r27
,
boot_cpuid
)
sub
r27
,
r27
,
r26
lwz
r27
,
0
(
r27
)
LOADADDR
(
r24
,
paca
)
/*
Get
base
vaddr
of
paca
array
*/
mulli
r13
,
r27
,
PACA_SIZE
/*
Calculate
vaddr
of
right
paca
*/
add
r13
,
r13
,
r24
/*
for
this
processor
.
*/
sub
r13
,
r13
,
r26
/*
convert
to
physical
addr
*/
mtspr
SPRN_SPRG3
,
r13
/*
PPPBBB
:
Temp
...
-
Peter
*/
/
*
Do
very
early
kernel
initializations
,
including
initial
hash
table
,
*
stab
and
slb
setup
before
we
turn
on
relocation
.
*/
/
*
Restore
parameters
passed
from
prom_init
/
kexec
*/
mr
r3
,
r31
bl
.
early_setup
LOADADDR
(
r3
,.
start_here_common
)
SET_REG_TO_CONST
(
r4
,
MSR_KERNEL
)
mtspr
SPRN_SRR0
,
r3
mtspr
SPRN_SRR1
,
r4
rfid
b
.
/*
prevent
speculative
execution
*/
#endif /* CONFIG_PPC_MULTIPLATFORM */
/
*
This
is
where
all
platforms
converge
execution
*/
_STATIC
(
start_here_common
)
/
*
relocation
is
on
at
this
point
*/
/
*
The
following
code
sets
up
the
SP
and
TOC
now
that
we
are
*/
/
*
running
with
translation
enabled
.
*/
LOADADDR
(
r3
,
init_thread_union
)
/
*
set
up
the
stack
*/
addi
r1
,
r3
,
THREAD_SIZE
li
r0
,
0
stdu
r0
,-
STACK_FRAME_OVERHEAD
(
r1
)
/
*
Apply
the
CPUs
-
specific
fixups
(
nop
out
sections
not
relevant
*
to
this
CPU
*/
li
r3
,
0
bl
.
do_cpu_ftr_fixups
LOADADDR
(
r26
,
boot_cpuid
)
lwz
r26
,
0
(
r26
)
LOADADDR
(
r24
,
paca
)
/*
Get
base
vaddr
of
paca
array
*/
mulli
r13
,
r26
,
PACA_SIZE
/*
Calculate
vaddr
of
right
paca
*/
add
r13
,
r13
,
r24
/*
for
this
processor
.
*/
mtspr
SPRN_SPRG3
,
r13
/
*
ptr
to
current
*/
LOADADDR
(
r4
,
init_task
)
std
r4
,
PACACURRENT
(
r13
)
/
*
Load
the
TOC
*/
ld
r2
,
PACATOC
(
r13
)
std
r1
,
PACAKSAVE
(
r13
)
bl
.
setup_system
/
*
Load
up
the
kernel
context
*/
5
:
#ifdef DO_SOFT_DISABLE
li
r5
,
0
stb
r5
,
PACAPROCENABLED
(
r13
)
/*
Soft
Disabled
*/
mfmsr
r5
ori
r5
,
r5
,
MSR_EE
/*
Hard
Enabled
*/
mtmsrd
r5
#endif
bl
.
start_kernel
_GLOBAL
(
hmt_init
)
#ifdef CONFIG_HMT
LOADADDR
(
r5
,
hmt_thread_data
)
mfspr
r7
,
SPRN_PVR
srwi
r7
,
r7
,
16
cmpwi
r7
,
0x34
/*
Pulsar
*/
beq
90
f
cmpwi
r7
,
0x36
/*
Icestar
*/
beq
91
f
cmpwi
r7
,
0x37
/*
SStar
*/
beq
91
f
b
101
f
90
:
mfspr
r6
,
SPRN_PIR
andi
.
r6
,
r6
,
0x1f
b
92
f
91
:
mfspr
r6
,
SPRN_PIR
andi
.
r6
,
r6
,
0x3ff
92
:
sldi
r4
,
r24
,
3
stwx
r6
,
r5
,
r4
bl
.
hmt_start_secondary
b
101
f
__hmt_secondary_hold
:
LOADADDR
(
r5
,
hmt_thread_data
)
clrldi
r5
,
r5
,
4
li
r7
,
0
mfspr
r6
,
SPRN_PIR
mfspr
r8
,
SPRN_PVR
srwi
r8
,
r8
,
16
cmpwi
r8
,
0x34
bne
93
f
andi
.
r6
,
r6
,
0x1f
b
103
f
93
:
andi
.
r6
,
r6
,
0x3f
103
:
lwzx
r8
,
r5
,
r7
cmpw
r8
,
r6
beq
104
f
addi
r7
,
r7
,
8
b
103
b
104
:
addi
r7
,
r7
,
4
lwzx
r9
,
r5
,
r7
mr
r24
,
r9
101
:
#endif
mr
r3
,
r24
b
.
pSeries_secondary_smp_init
#ifdef CONFIG_HMT
_GLOBAL
(
hmt_start_secondary
)
LOADADDR
(
r4
,
__hmt_secondary_hold
)
clrldi
r4
,
r4
,
4
mtspr
SPRN_NIADORM
,
r4
mfspr
r4
,
SPRN_MSRDORM
li
r5
,
-
65
and
r4
,
r4
,
r5
mtspr
SPRN_MSRDORM
,
r4
lis
r4
,
0xffef
ori
r4
,
r4
,
0x7403
mtspr
SPRN_TSC
,
r4
li
r4
,
0x1f4
mtspr
SPRN_TST
,
r4
mfspr
r4
,
SPRN_HID0
ori
r4
,
r4
,
0x1
mtspr
SPRN_HID0
,
r4
mfspr
r4
,
SPRN_CTRLF
oris
r4
,
r4
,
0x40
mtspr
SPRN_CTRLT
,
r4
blr
#endif
/*
*
We
put
a
few
things
here
that
have
to
be
page
-
aligned
.
*
This
stuff
goes
at
the
beginning
of
the
bss
,
which
is
page
-
aligned
.
*/
.
section
".bss"
.
align
PAGE_SHIFT
.
globl
empty_zero_page
empty_zero_page
:
.
space
PAGE_SIZE
.
globl
swapper_pg_dir
swapper_pg_dir
:
.
space
PAGE_SIZE
/*
*
This
space
gets
a
copy
of
optional
info
passed
to
us
by
the
bootstrap
*
Used
to
pass
parameters
into
the
kernel
like
root
=/
dev
/
sda1
,
etc
.
*/
.
globl
cmd_line
cmd_line
:
.
space
COMMAND_LINE_SIZE
arch/ppc64/kernel/misc.S
deleted
100644 → 0
View file @
302fe175
/*
*
arch
/
ppc
/
kernel
/
misc
.
S
*
*
*
*
This
file
contains
miscellaneous
low
-
level
functions
.
*
Copyright
(
C
)
1995
-
1996
Gary
Thomas
(
gdt
@
linuxppc
.
org
)
*
*
Largely
rewritten
by
Cort
Dougan
(
cort
@
cs
.
nmt
.
edu
)
*
and
Paul
Mackerras
.
*
Adapted
for
iSeries
by
Mike
Corrigan
(
mikejc
@
us
.
ibm
.
com
)
*
PPC64
updates
by
Dave
Engebretsen
(
engebret
@
us
.
ibm
.
com
)
*
*
This
program
is
free
software
; you can redistribute it and/or
*
modify
it
under
the
terms
of
the
GNU
General
Public
License
*
as
published
by
the
Free
Software
Foundation
; either version
*
2
of
the
License
,
or
(
at
your
option
)
any
later
version
.
*
*/
#include <linux/config.h>
#include <linux/sys.h>
#include <asm/unistd.h>
#include <asm/errno.h>
#include <asm/processor.h>
#include <asm/page.h>
#include <asm/cache.h>
#include <asm/ppc_asm.h>
#include <asm/asm-offsets.h>
#include <asm/cputable.h>
#include <asm/thread_info.h>
.
text
/*
*
Returns
(
address
we
were
linked
at
)
-
(
address
we
are
running
at
)
*
for
use
before
the
text
and
data
are
mapped
to
KERNELBASE
.
*/
_GLOBAL
(
reloc_offset
)
mflr
r0
bl
1
f
1
:
mflr
r3
LOADADDR
(
r4
,1
b
)
sub
r3
,
r4
,
r3
mtlr
r0
blr
_GLOBAL
(
get_msr
)
mfmsr
r3
blr
_GLOBAL
(
get_dar
)
mfdar
r3
blr
_GLOBAL
(
get_srr0
)
mfsrr0
r3
blr
_GLOBAL
(
get_srr1
)
mfsrr1
r3
blr
_GLOBAL
(
get_sp
)
mr
r3
,
r1
blr
#ifdef CONFIG_IRQSTACKS
_GLOBAL
(
call_do_softirq
)
mflr
r0
std
r0
,
16
(
r1
)
stdu
r1
,
THREAD_SIZE
-
112
(
r3
)
mr
r1
,
r3
bl
.
__do_softirq
ld
r1
,
0
(
r1
)
ld
r0
,
16
(
r1
)
mtlr
r0
blr
_GLOBAL
(
call___do_IRQ
)
mflr
r0
std
r0
,
16
(
r1
)
stdu
r1
,
THREAD_SIZE
-
112
(
r5
)
mr
r1
,
r5
bl
.
__do_IRQ
ld
r1
,
0
(
r1
)
ld
r0
,
16
(
r1
)
mtlr
r0
blr
#endif /* CONFIG_IRQSTACKS */
/
*
*
To
be
called
by
C
code
which
needs
to
do
some
operations
with
MMU
*
disabled
.
Note
that
interrupts
have
to
be
disabled
by
the
caller
*
prior
to
calling
us
.
The
code
called
_MUST_
be
in
the
RMO
of
course
*
and
part
of
the
linear
mapping
as
we
don
't attempt to translate the
*
stack
pointer
at
all
.
The
function
is
called
with
the
stack
switched
*
to
this
CPU
emergency
stack
*
*
prototype
is
void
*
call_with_mmu_off
(
void
*
func
,
void
*
data
)
;
*
*
the
called
function
is
expected
to
be
of
the
form
*
*
void
*
called
(
void
*
data
)
;
*/
_GLOBAL
(
call_with_mmu_off
)
mflr
r0
/*
get
link
,
save
it
on
stackframe
*/
std
r0
,
16
(
r1
)
mr
r1
,
r5
/*
save
old
stack
ptr
*/
ld
r1
,
PACAEMERGSP
(
r13
)
/*
get
emerg
.
stack
*/
subi
r1
,
r1
,
STACK_FRAME_OVERHEAD
std
r0
,
16
(
r1
)
/*
save
link
on
emerg
.
stack
*/
std
r5
,
0
(
r1
)
/*
save
old
stack
ptr
in
backchain
*/
ld
r3
,
0
(
r3
)
/*
get
to
real
function
ptr
(
assume
same
TOC
)
*/
bl
2
f
/*
we
need
LR
to
return
,
continue
at
label
2
*/
ld
r0
,
16
(
r1
)
/*
we
return
here
from
the
call
,
get
LR
and
*/
ld
r1
,
0
(
r1
)
/*
..
old
stack
ptr
*/
mtspr
SPRN_SRR0
,
r0
/*
and
get
back
to
virtual
mode
with
these
*/
mfmsr
r4
ori
r4
,
r4
,
MSR_IR
|
MSR_DR
mtspr
SPRN_SRR1
,
r4
rfid
2
:
mtspr
SPRN_SRR0
,
r3
/*
coming
from
above
,
enter
real
mode
*/
mr
r3
,
r4
/*
get
parameter
*/
mfmsr
r0
ori
r0
,
r0
,
MSR_IR
|
MSR_DR
xori
r0
,
r0
,
MSR_IR
|
MSR_DR
mtspr
SPRN_SRR1
,
r0
rfid
.
section
".toc"
,
"aw"
PPC64_CACHES
:
.
tc
ppc64_caches
[
TC
],
ppc64_caches
.
section
".text"
/*
*
Write
any
modified
data
cache
blocks
out
to
memory
*
and
invalidate
the
corresponding
instruction
cache
blocks
.
*
*
flush_icache_range
(
unsigned
long
start
,
unsigned
long
stop
)
*
*
flush
all
bytes
from
start
through
stop
-
1
inclusive
*/
_KPROBE
(
__flush_icache_range
)
/*
*
Flush
the
data
cache
to
memory
*
*
Different
systems
have
different
cache
line
sizes
*
and
in
some
cases
i
-
cache
and
d
-
cache
line
sizes
differ
from
*
each
other
.
*/
ld
r10
,
PPC64_CACHES
@
toc
(
r2
)
lwz
r7
,
DCACHEL1LINESIZE
(
r10
)/*
Get
cache
line
size
*/
addi
r5
,
r7
,-
1
andc
r6
,
r3
,
r5
/*
round
low
to
line
bdy
*/
subf
r8
,
r6
,
r4
/*
compute
length
*/
add
r8
,
r8
,
r5
/*
ensure
we
get
enough
*/
lwz
r9
,
DCACHEL1LOGLINESIZE
(
r10
)
/*
Get
log
-
2
of
cache
line
size
*/
srw
.
r8
,
r8
,
r9
/*
compute
line
count
*/
beqlr
/*
nothing
to
do
?
*/
mtctr
r8
1
:
dcbst
0
,
r6
add
r6
,
r6
,
r7
bdnz
1
b
sync
/*
Now
invalidate
the
instruction
cache
*/
lwz
r7
,
ICACHEL1LINESIZE
(
r10
)
/*
Get
Icache
line
size
*/
addi
r5
,
r7
,-
1
andc
r6
,
r3
,
r5
/*
round
low
to
line
bdy
*/
subf
r8
,
r6
,
r4
/*
compute
length
*/
add
r8
,
r8
,
r5
lwz
r9
,
ICACHEL1LOGLINESIZE
(
r10
)
/*
Get
log
-
2
of
Icache
line
size
*/
srw
.
r8
,
r8
,
r9
/*
compute
line
count
*/
beqlr
/*
nothing
to
do
?
*/
mtctr
r8
2
:
icbi
0
,
r6
add
r6
,
r6
,
r7
bdnz
2
b
isync
blr
.
text
/*
*
Like
above
,
but
only
do
the
D
-
cache
.
*
*
flush_dcache_range
(
unsigned
long
start
,
unsigned
long
stop
)
*
*
flush
all
bytes
from
start
to
stop
-
1
inclusive
*/
_GLOBAL
(
flush_dcache_range
)
/*
*
Flush
the
data
cache
to
memory
*
*
Different
systems
have
different
cache
line
sizes
*/
ld
r10
,
PPC64_CACHES
@
toc
(
r2
)
lwz
r7
,
DCACHEL1LINESIZE
(
r10
)
/*
Get
dcache
line
size
*/
addi
r5
,
r7
,-
1
andc
r6
,
r3
,
r5
/*
round
low
to
line
bdy
*/
subf
r8
,
r6
,
r4
/*
compute
length
*/
add
r8
,
r8
,
r5
/*
ensure
we
get
enough
*/
lwz
r9
,
DCACHEL1LOGLINESIZE
(
r10
)
/*
Get
log
-
2
of
dcache
line
size
*/
srw
.
r8
,
r8
,
r9
/*
compute
line
count
*/
beqlr
/*
nothing
to
do
?
*/
mtctr
r8
0
:
dcbst
0
,
r6
add
r6
,
r6
,
r7
bdnz
0
b
sync
blr
/*
*
Like
above
,
but
works
on
non
-
mapped
physical
addresses
.
*
Use
only
for
non
-
LPAR
setups
!
It
also
assumes
real
mode
*
is
cacheable
.
Used
for
flushing
out
the
DART
before
using
*
it
as
uncacheable
memory
*
*
flush_dcache_phys_range
(
unsigned
long
start
,
unsigned
long
stop
)
*
*
flush
all
bytes
from
start
to
stop
-
1
inclusive
*/
_GLOBAL
(
flush_dcache_phys_range
)
ld
r10
,
PPC64_CACHES
@
toc
(
r2
)
lwz
r7
,
DCACHEL1LINESIZE
(
r10
)
/*
Get
dcache
line
size
*/
addi
r5
,
r7
,-
1
andc
r6
,
r3
,
r5
/*
round
low
to
line
bdy
*/
subf
r8
,
r6
,
r4
/*
compute
length
*/
add
r8
,
r8
,
r5
/*
ensure
we
get
enough
*/
lwz
r9
,
DCACHEL1LOGLINESIZE
(
r10
)
/*
Get
log
-
2
of
dcache
line
size
*/
srw
.
r8
,
r8
,
r9
/*
compute
line
count
*/
beqlr
/*
nothing
to
do
?
*/
mfmsr
r5
/*
Disable
MMU
Data
Relocation
*/
ori
r0
,
r5
,
MSR_DR
xori
r0
,
r0
,
MSR_DR
sync
mtmsr
r0
sync
isync
mtctr
r8
0
:
dcbst
0
,
r6
add
r6
,
r6
,
r7
bdnz
0
b
sync
isync
mtmsr
r5
/*
Re
-
enable
MMU
Data
Relocation
*/
sync
isync
blr
_GLOBAL
(
flush_inval_dcache_range
)
ld
r10
,
PPC64_CACHES
@
toc
(
r2
)
lwz
r7
,
DCACHEL1LINESIZE
(
r10
)
/*
Get
dcache
line
size
*/
addi
r5
,
r7
,-
1
andc
r6
,
r3
,
r5
/*
round
low
to
line
bdy
*/
subf
r8
,
r6
,
r4
/*
compute
length
*/
add
r8
,
r8
,
r5
/*
ensure
we
get
enough
*/
lwz
r9
,
DCACHEL1LOGLINESIZE
(
r10
)/*
Get
log
-
2
of
dcache
line
size
*/
srw
.
r8
,
r8
,
r9
/*
compute
line
count
*/
beqlr
/*
nothing
to
do
?
*/
sync
isync
mtctr
r8
0
:
dcbf
0
,
r6
add
r6
,
r6
,
r7
bdnz
0
b
sync
isync
blr
/*
*
Flush
a
particular
page
from
the
data
cache
to
RAM
.
*
Note
:
this
is
necessary
because
the
instruction
cache
does
*
not
*
*
snoop
from
the
data
cache
.
*
*
void
__flush_dcache_icache
(
void
*
page
)
*/
_GLOBAL
(
__flush_dcache_icache
)
/*
*
Flush
the
data
cache
to
memory
*
*
Different
systems
have
different
cache
line
sizes
*/
/*
Flush
the
dcache
*/
ld
r7
,
PPC64_CACHES
@
toc
(
r2
)
clrrdi
r3
,
r3
,
PAGE_SHIFT
/*
Page
align
*/
lwz
r4
,
DCACHEL1LINESPERPAGE
(
r7
)
/*
Get
#
dcache
lines
per
page
*/
lwz
r5
,
DCACHEL1LINESIZE
(
r7
)
/*
Get
dcache
line
size
*/
mr
r6
,
r3
mtctr
r4
0
:
dcbst
0
,
r6
add
r6
,
r6
,
r5
bdnz
0
b
sync
/*
Now
invalidate
the
icache
*/
lwz
r4
,
ICACHEL1LINESPERPAGE
(
r7
)
/*
Get
#
icache
lines
per
page
*/
lwz
r5
,
ICACHEL1LINESIZE
(
r7
)
/*
Get
icache
line
size
*/
mtctr
r4
1
:
icbi
0
,
r3
add
r3
,
r3
,
r5
bdnz
1
b
isync
blr
/*
*
I
/
O
string
operations
*
*
insb
(
port
,
buf
,
len
)
*
outsb
(
port
,
buf
,
len
)
*
insw
(
port
,
buf
,
len
)
*
outsw
(
port
,
buf
,
len
)
*
insl
(
port
,
buf
,
len
)
*
outsl
(
port
,
buf
,
len
)
*
insw_ns
(
port
,
buf
,
len
)
*
outsw_ns
(
port
,
buf
,
len
)
*
insl_ns
(
port
,
buf
,
len
)
*
outsl_ns
(
port
,
buf
,
len
)
*
*
The
*
_ns
versions
don
't do byte-swapping.
*/
_GLOBAL
(
_insb
)
cmpwi
0
,
r5
,
0
mtctr
r5
subi
r4
,
r4
,
1
blelr
-
00
:
lbz
r5
,
0
(
r3
)
eieio
stbu
r5
,
1
(
r4
)
bdnz
00
b
twi
0
,
r5
,
0
isync
blr
_GLOBAL
(
_outsb
)
cmpwi
0
,
r5
,
0
mtctr
r5
subi
r4
,
r4
,
1
blelr
-
00
:
lbzu
r5
,
1
(
r4
)
stb
r5
,
0
(
r3
)
bdnz
00
b
sync
blr
_GLOBAL
(
_insw
)
cmpwi
0
,
r5
,
0
mtctr
r5
subi
r4
,
r4
,
2
blelr
-
00
:
lhbrx
r5
,
0
,
r3
eieio
sthu
r5
,
2
(
r4
)
bdnz
00
b
twi
0
,
r5
,
0
isync
blr
_GLOBAL
(
_outsw
)
cmpwi
0
,
r5
,
0
mtctr
r5
subi
r4
,
r4
,
2
blelr
-
00
:
lhzu
r5
,
2
(
r4
)
sthbrx
r5
,
0
,
r3
bdnz
00
b
sync
blr
_GLOBAL
(
_insl
)
cmpwi
0
,
r5
,
0
mtctr
r5
subi
r4
,
r4
,
4
blelr
-
00
:
lwbrx
r5
,
0
,
r3
eieio
stwu
r5
,
4
(
r4
)
bdnz
00
b
twi
0
,
r5
,
0
isync
blr
_GLOBAL
(
_outsl
)
cmpwi
0
,
r5
,
0
mtctr
r5
subi
r4
,
r4
,
4
blelr
-
00
:
lwzu
r5
,
4
(
r4
)
stwbrx
r5
,
0
,
r3
bdnz
00
b
sync
blr
/*
_GLOBAL
(
ide_insw
)
now
in
drivers
/
ide
/
ide
-
iops
.
c
*/
_GLOBAL
(
_insw_ns
)
cmpwi
0
,
r5
,
0
mtctr
r5
subi
r4
,
r4
,
2
blelr
-
00
:
lhz
r5
,
0
(
r3
)
eieio
sthu
r5
,
2
(
r4
)
bdnz
00
b
twi
0
,
r5
,
0
isync
blr
/*
_GLOBAL
(
ide_outsw
)
now
in
drivers
/
ide
/
ide
-
iops
.
c
*/
_GLOBAL
(
_outsw_ns
)
cmpwi
0
,
r5
,
0
mtctr
r5
subi
r4
,
r4
,
2
blelr
-
00
:
lhzu
r5
,
2
(
r4
)
sth
r5
,
0
(
r3
)
bdnz
00
b
sync
blr
_GLOBAL
(
_insl_ns
)
cmpwi
0
,
r5
,
0
mtctr
r5
subi
r4
,
r4
,
4
blelr
-
00
:
lwz
r5
,
0
(
r3
)
eieio
stwu
r5
,
4
(
r4
)
bdnz
00
b
twi
0
,
r5
,
0
isync
blr
_GLOBAL
(
_outsl_ns
)
cmpwi
0
,
r5
,
0
mtctr
r5
subi
r4
,
r4
,
4
blelr
-
00
:
lwzu
r5
,
4
(
r4
)
stw
r5
,
0
(
r3
)
bdnz
00
b
sync
blr
/*
*
identify_cpu
and
calls
setup_cpu
*
In
:
r3
=
base
of
the
cpu_specs
array
*
r4
=
address
of
cur_cpu_spec
*
r5
=
relocation
offset
*/
_GLOBAL
(
identify_cpu
)
mfpvr
r7
1
:
lwz
r8
,
CPU_SPEC_PVR_MASK
(
r3
)
and
r8
,
r8
,
r7
lwz
r9
,
CPU_SPEC_PVR_VALUE
(
r3
)
cmplw
0
,
r9
,
r8
beq
1
f
addi
r3
,
r3
,
CPU_SPEC_ENTRY_SIZE
b
1
b
1
:
add
r0
,
r3
,
r5
std
r0
,
0
(
r4
)
ld
r4
,
CPU_SPEC_SETUP
(
r3
)
sub
r4
,
r4
,
r5
ld
r4
,
0
(
r4
)
sub
r4
,
r4
,
r5
mtctr
r4
/
*
Calling
convention
for
cpu
setup
is
r3
=
offset
,
r4
=
cur_cpu_spec
*/
mr
r4
,
r3
mr
r3
,
r5
bctr
/*
*
do_cpu_ftr_fixups
-
goes
through
the
list
of
CPU
feature
fixups
*
and
writes
nop
's over sections of code that don'
t
apply
for
this
cpu
.
*
r3
=
data
offset
(
not
changed
)
*/
_GLOBAL
(
do_cpu_ftr_fixups
)
/
*
Get
CPU
0
features
*/
LOADADDR
(
r6
,
cur_cpu_spec
)
sub
r6
,
r6
,
r3
ld
r4
,
0
(
r6
)
sub
r4
,
r4
,
r3
ld
r4
,
CPU_SPEC_FEATURES
(
r4
)
/
*
Get
the
fixup
table
*/
LOADADDR
(
r6
,
__start___ftr_fixup
)
sub
r6
,
r6
,
r3
LOADADDR
(
r7
,
__stop___ftr_fixup
)
sub
r7
,
r7
,
r3
/
*
Do
the
fixup
*/
1
:
cmpld
r6
,
r7
bgelr
addi
r6
,
r6
,
32
ld
r8
,-
32
(
r6
)
/*
mask
*/
and
r8
,
r8
,
r4
ld
r9
,-
24
(
r6
)
/*
value
*/
cmpld
r8
,
r9
beq
1
b
ld
r8
,-
16
(
r6
)
/*
section
begin
*/
ld
r9
,-
8
(
r6
)
/*
section
end
*/
subf
.
r9
,
r8
,
r9
beq
1
b
/
*
write
nops
over
the
section
of
code
*/
/
*
todo
:
if
large
section
,
add
a
branch
at
the
start
of
it
*/
srwi
r9
,
r9
,
2
mtctr
r9
sub
r8
,
r8
,
r3
lis
r0
,
0x60000000
@
h
/*
nop
*/
3
:
stw
r0
,
0
(
r8
)
andi
.
r10
,
r4
,
CPU_FTR_SPLIT_ID_CACHE
@
l
beq
2
f
dcbst
0
,
r8
/*
suboptimal
,
but
simpler
*/
sync
icbi
0
,
r8
2
:
addi
r8
,
r8
,
4
bdnz
3
b
sync
/*
additional
sync
needed
on
g4
*/
isync
b
1
b
#if defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE)
/*
*
Do
an
IO
access
in
real
mode
*/
_GLOBAL
(
real_readb
)
mfmsr
r7
ori
r0
,
r7
,
MSR_DR
xori
r0
,
r0
,
MSR_DR
sync
mtmsrd
r0
sync
isync
mfspr
r6
,
SPRN_HID4
rldicl
r5
,
r6
,
32
,
0
ori
r5
,
r5
,
0x100
rldicl
r5
,
r5
,
32
,
0
sync
mtspr
SPRN_HID4
,
r5
isync
slbia
isync
lbz
r3
,
0
(
r3
)
sync
mtspr
SPRN_HID4
,
r6
isync
slbia
isync
mtmsrd
r7
sync
isync
blr
/*
*
Do
an
IO
access
in
real
mode
*/
_GLOBAL
(
real_writeb
)
mfmsr
r7
ori
r0
,
r7
,
MSR_DR
xori
r0
,
r0
,
MSR_DR
sync
mtmsrd
r0
sync
isync
mfspr
r6
,
SPRN_HID4
rldicl
r5
,
r6
,
32
,
0
ori
r5
,
r5
,
0x100
rldicl
r5
,
r5
,
32
,
0
sync
mtspr
SPRN_HID4
,
r5
isync
slbia
isync
stb
r3
,
0
(
r4
)
sync
mtspr
SPRN_HID4
,
r6
isync
slbia
isync
mtmsrd
r7
sync
isync
blr
#endif /* defined(CONFIG_PPC_PMAC) || defined(CONFIG_PPC_MAPLE) */
/*
*
SCOM
access
functions
for
970
(
FX
only
for
now
)
*
*
unsigned
long
scom970_read
(
unsigned
int
address
)
;
*
void
scom970_write
(
unsigned
int
address
,
unsigned
long
value
)
;
*
*
The
address
passed
in
is
the
24
bits
register
address
.
This
code
*
is
970
specific
and
will
not
check
the
status
bits
,
so
you
should
*
know
what
you
are
doing
.
*/
_GLOBAL
(
scom970_read
)
/
*
interrupts
off
*/
mfmsr
r4
ori
r0
,
r4
,
MSR_EE
xori
r0
,
r0
,
MSR_EE
mtmsrd
r0
,
1
/
*
rotate
24
bits
SCOM
address
8
bits
left
and
mask
out
it
's low 8 bits
*
(
including
parity
)
.
On
current
CPUs
they
must
be
0
'd,
*
and
finally
or
in
RW
bit
*/
rlwinm
r3
,
r3
,
8
,
0
,
15
ori
r3
,
r3
,
0x8000
/
*
do
the
actual
scom
read
*/
sync
mtspr
SPRN_SCOMC
,
r3
isync
mfspr
r3
,
SPRN_SCOMD
isync
mfspr
r0
,
SPRN_SCOMC
isync
/
*
XXX
:
fixup
result
on
some
buggy
970
's (ouch ! we lost a bit, bah
*
that
's the best we can do). Not implemented yet as we don'
t
use
*
the
scom
on
any
of
the
bogus
CPUs
yet
,
but
may
have
to
be
done
*
ultimately
*/
/
*
restore
interrupts
*/
mtmsrd
r4
,
1
blr
_GLOBAL
(
scom970_write
)
/
*
interrupts
off
*/
mfmsr
r5
ori
r0
,
r5
,
MSR_EE
xori
r0
,
r0
,
MSR_EE
mtmsrd
r0
,
1
/
*
rotate
24
bits
SCOM
address
8
bits
left
and
mask
out
it
's low 8 bits
*
(
including
parity
)
.
On
current
CPUs
they
must
be
0
'd.
*/
rlwinm
r3
,
r3
,
8
,
0
,
15
sync
mtspr
SPRN_SCOMD
,
r4
/*
write
data
*/
isync
mtspr
SPRN_SCOMC
,
r3
/*
write
command
*/
isync
mfspr
3
,
SPRN_SCOMC
isync
/
*
restore
interrupts
*/
mtmsrd
r5
,
1
blr
/*
*
Create
a
kernel
thread
*
kernel_thread
(
fn
,
arg
,
flags
)
*/
_GLOBAL
(
kernel_thread
)
std
r29
,-
24
(
r1
)
std
r30
,-
16
(
r1
)
stdu
r1
,-
STACK_FRAME_OVERHEAD
(
r1
)
mr
r29
,
r3
mr
r30
,
r4
ori
r3
,
r5
,
CLONE_VM
/*
flags
*/
oris
r3
,
r3
,(
CLONE_UNTRACED
>>
16
)
li
r4
,
0
/*
new
sp
(
unused
)
*/
li
r0
,
__NR_clone
sc
cmpdi
0
,
r3
,
0
/*
parent
or
child
?
*/
bne
1
f
/*
return
if
parent
*/
li
r0
,
0
stdu
r0
,-
STACK_FRAME_OVERHEAD
(
r1
)
ld
r2
,
8
(
r29
)
ld
r29
,
0
(
r29
)
mtlr
r29
/*
fn
addr
in
lr
*/
mr
r3
,
r30
/*
load
arg
and
call
fn
*/
blrl
li
r0
,
__NR_exit
/*
exit
after
child
exits
*/
li
r3
,
0
sc
1
:
addi
r1
,
r1
,
STACK_FRAME_OVERHEAD
ld
r29
,-
24
(
r1
)
ld
r30
,-
16
(
r1
)
blr
/*
*
disable_kernel_fp
()
*
Disable
the
FPU
.
*/
_GLOBAL
(
disable_kernel_fp
)
mfmsr
r3
rldicl
r0
,
r3
,(
63
-
MSR_FP_LG
),
1
rldicl
r3
,
r0
,(
MSR_FP_LG
+
1
),
0
mtmsrd
r3
/*
disable
use
of
fpu
now
*/
isync
blr
#ifdef CONFIG_ALTIVEC
#if 0 /* this has no callers for now */
/*
*
disable_kernel_altivec
()
*
Disable
the
VMX
.
*/
_GLOBAL
(
disable_kernel_altivec
)
mfmsr
r3
rldicl
r0
,
r3
,(
63
-
MSR_VEC_LG
),
1
rldicl
r3
,
r0
,(
MSR_VEC_LG
+
1
),
0
mtmsrd
r3
/*
disable
use
of
VMX
now
*/
isync
blr
#endif /* 0 */
/*
*
giveup_altivec
(
tsk
)
*
Disable
VMX
for
the
task
given
as
the
argument
,
*
and
save
the
vector
registers
in
its
thread_struct
.
*
Enables
the
VMX
for
use
in
the
kernel
on
return
.
*/
_GLOBAL
(
giveup_altivec
)
mfmsr
r5
oris
r5
,
r5
,
MSR_VEC
@
h
mtmsrd
r5
/*
enable
use
of
VMX
now
*/
isync
cmpdi
0
,
r3
,
0
beqlr
-
/*
if
no
previous
owner
,
done
*/
addi
r3
,
r3
,
THREAD
/*
want
THREAD
of
task
*/
ld
r5
,
PT_REGS
(
r3
)
cmpdi
0
,
r5
,
0
SAVE_32VRS
(0,
r4
,
r3
)
mfvscr
vr0
li
r4
,
THREAD_VSCR
stvx
vr0
,
r4
,
r3
beq
1
f
ld
r4
,
_MSR
-
STACK_FRAME_OVERHEAD
(
r5
)
lis
r3
,
MSR_VEC
@
h
andc
r4
,
r4
,
r3
/*
disable
FP
for
previous
task
*/
std
r4
,
_MSR
-
STACK_FRAME_OVERHEAD
(
r5
)
1
:
#ifndef CONFIG_SMP
li
r5
,
0
ld
r4
,
last_task_used_altivec
@
got
(
r2
)
std
r5
,
0
(
r4
)
#endif /* CONFIG_SMP */
blr
#endif /* CONFIG_ALTIVEC */
_GLOBAL
(
__setup_cpu_power3
)
blr
_GLOBAL
(
execve
)
li
r0
,
__NR_execve
sc
bnslr
neg
r3
,
r3
blr
/*
kexec_wait
(
phys_cpu
)
*
*
wait
for
the
flag
to
change
,
indicating
this
kernel
is
going
away
but
*
the
slave
code
for
the
next
one
is
at
addresses
0
to
100
.
*
*
This
is
used
by
all
slaves
.
*
*
Physical
(
hardware
)
cpu
id
should
be
in
r3
.
*/
_GLOBAL
(
kexec_wait
)
bl
1
f
1
:
mflr
r5
addi
r5
,
r5
,
kexec_flag
-
1
b
99
:
HMT_LOW
#ifdef CONFIG_KEXEC /* use no memory without kexec */
lwz
r4
,
0
(
r5
)
cmpwi
0
,
r4
,
0
bnea
0x60
#endif
b
99
b
/*
this
can
be
in
text
because
we
won
't change it until we are
*
running
in
real
anyways
*/
kexec_flag
:
.
long
0
#ifdef CONFIG_KEXEC
/*
kexec_smp_wait
(
void
)
*
*
call
with
interrupts
off
*
note
:
this
is
a
terminal
routine
,
it
does
not
save
lr
*
*
get
phys
id
from
paca
*
set
paca
id
to
-
1
to
say
we
got
here
*
switch
to
real
mode
*
join
other
cpus
in
kexec_wait
(
phys_id
)
*/
_GLOBAL
(
kexec_smp_wait
)
lhz
r3
,
PACAHWCPUID
(
r13
)
li
r4
,-
1
sth
r4
,
PACAHWCPUID
(
r13
)
/*
let
others
know
we
left
*/
bl
real_mode
b
.
kexec_wait
/*
*
switch
to
real
mode
(
turn
mmu
off
)
*
we
use
the
early
kernel
trick
that
the
hardware
ignores
bits
*
0
and
1
(
big
endian
)
of
the
effective
address
in
real
mode
*
*
don
't overwrite r3 here, it is live for kexec_wait above.
*/
real_mode
:
/
*
assume
normal
blr
return
*/
1
:
li
r9
,
MSR_RI
li
r10
,
MSR_DR
|
MSR_IR
mflr
r11
/*
return
address
to
SRR0
*/
mfmsr
r12
andc
r9
,
r12
,
r9
andc
r10
,
r12
,
r10
mtmsrd
r9
,
1
mtspr
SPRN_SRR1
,
r10
mtspr
SPRN_SRR0
,
r11
rfid
/*
*
kexec_sequence
(
newstack
,
start
,
image
,
control
,
clear_all
())
*
*
does
the
grungy
work
with
stack
switching
and
real
mode
switches
*
also
does
simple
calls
to
other
code
*/
_GLOBAL
(
kexec_sequence
)
mflr
r0
std
r0
,
16
(
r1
)
/
*
switch
stacks
to
newstack
--
&
kexec_stack
.
stack
*/
stdu
r1
,
THREAD_SIZE
-
112
(
r3
)
mr
r1
,
r3
li
r0
,
0
std
r0
,
16
(
r1
)
/
*
save
regs
for
local
vars
on
new
stack
.
*
yes
,
we
won
't go back, but ...
*/
std
r31
,-
8
(
r1
)
std
r30
,-
16
(
r1
)
std
r29
,-
24
(
r1
)
std
r28
,-
32
(
r1
)
std
r27
,-
40
(
r1
)
std
r26
,-
48
(
r1
)
std
r25
,-
56
(
r1
)
stdu
r1
,-
112
-
64
(
r1
)
/
*
save
args
into
preserved
regs
*/
mr
r31
,
r3
/*
newstack
(
both
)
*/
mr
r30
,
r4
/*
start
(
real
)
*/
mr
r29
,
r5
/*
image
(
virt
)
*/
mr
r28
,
r6
/*
control
,
unused
*/
mr
r27
,
r7
/*
clear_all
()
fn
desc
*/
mr
r26
,
r8
/*
spare
*/
lhz
r25
,
PACAHWCPUID
(
r13
)
/*
get
our
phys
cpu
from
paca
*/
/
*
disable
interrupts
,
we
are
overwriting
kernel
data
next
*/
mfmsr
r3
rlwinm
r3
,
r3
,
0
,
17
,
15
mtmsrd
r3
,
1
/
*
copy
dest
pages
,
flush
whole
dest
image
*/
mr
r3
,
r29
bl
.
kexec_copy_flush
/*
(
image
)
*/
/
*
turn
off
mmu
*/
bl
real_mode
/
*
clear
out
hardware
hash
page
table
and
tlb
*/
ld
r5
,
0
(
r27
)
/*
deref
function
descriptor
*/
mtctr
r5
bctrl
/*
ppc_md
.
hash_clear_all
(
void
)
; */
/*
*
kexec
image
calling
is
:
*
the
first
0x100
bytes
of
the
entry
point
are
copied
to
0
*
*
all
slaves
branch
to
slave
=
0x60
(
absolute
)
*
slave
(
phys_cpu_id
)
;
*
*
master
goes
to
start
=
entry
point
*
start
(
phys_cpu_id
,
start
,
0
)
;
*
*
*
a
wrapper
is
needed
to
call
existing
kernels
,
here
is
an
approximate
*
description
of
one
method
:
*
*
v2
:
(
2
.6.10
)
*
start
will
be
near
the
boot_block
(
maybe
0x100
bytes
before
it
?)
*
it
will
have
a
0x60
,
which
will
b
to
boot_block
,
where
it
will
wait
*
and
0
will
store
phys
into
struct
boot
-
block
and
load
r3
from
there
,
*
copy
kernel
0
-
0x100
and
tell
slaves
to
back
down
to
0x60
again
*
*
v1
:
(
2
.6.9
)
*
boot
block
will
have
all
cpus
scanning
device
tree
to
see
if
they
*
are
the
boot
cpu
?????
*
other
device
tree
differences
(
prop
sizes
,
va
vs
pa
,
etc
)
...
*/
/
*
copy
0x100
bytes
starting
at
start
to
0
*/
li
r3
,
0
mr
r4
,
r30
li
r5
,
0x100
li
r6
,
0
bl
.
copy_and_flush
/*
(
dest
,
src
,
copy
limit
,
start
offset
)
*/
1
:
/
*
assume
normal
blr
return
*/
/
*
release
other
cpus
to
the
new
kernel
secondary
start
at
0x60
*/
mflr
r5
li
r6
,
1
stw
r6
,
kexec_flag
-
1
b
(
5
)
mr
r3
,
r25
#
my
phys
cpu
mr
r4
,
r30
#
start
,
aka
phys
mem
offset
mtlr
4
li
r5
,
0
blr
/*
image
->
start
(
physid
,
image
->
start
,
0
)
; */
#endif /* CONFIG_KEXEC */
arch/ppc64/kernel/ppc_ksyms.c
deleted
100644 → 0
View file @
302fe175
/*
* c 2001 PPC 64 Team, IBM Corp
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/string.h>
#include <linux/console.h>
#include <net/checksum.h>
#include <asm/processor.h>
#include <asm/uaccess.h>
#include <asm/io.h>
#include <asm/system.h>
#include <asm/hw_irq.h>
#include <asm/abs_addr.h>
#include <asm/cacheflush.h>
EXPORT_SYMBOL
(
strcpy
);
EXPORT_SYMBOL
(
strncpy
);
EXPORT_SYMBOL
(
strcat
);
EXPORT_SYMBOL
(
strncat
);
EXPORT_SYMBOL
(
strchr
);
EXPORT_SYMBOL
(
strrchr
);
EXPORT_SYMBOL
(
strpbrk
);
EXPORT_SYMBOL
(
strstr
);
EXPORT_SYMBOL
(
strlen
);
EXPORT_SYMBOL
(
strnlen
);
EXPORT_SYMBOL
(
strcmp
);
EXPORT_SYMBOL
(
strncmp
);
EXPORT_SYMBOL
(
csum_partial
);
EXPORT_SYMBOL
(
csum_partial_copy_generic
);
EXPORT_SYMBOL
(
ip_fast_csum
);
EXPORT_SYMBOL
(
csum_tcpudp_magic
);
EXPORT_SYMBOL
(
__copy_tofrom_user
);
EXPORT_SYMBOL
(
__clear_user
);
EXPORT_SYMBOL
(
__strncpy_from_user
);
EXPORT_SYMBOL
(
__strnlen_user
);
EXPORT_SYMBOL
(
reloc_offset
);
EXPORT_SYMBOL
(
_insb
);
EXPORT_SYMBOL
(
_outsb
);
EXPORT_SYMBOL
(
_insw
);
EXPORT_SYMBOL
(
_outsw
);
EXPORT_SYMBOL
(
_insl
);
EXPORT_SYMBOL
(
_outsl
);
EXPORT_SYMBOL
(
_insw_ns
);
EXPORT_SYMBOL
(
_outsw_ns
);
EXPORT_SYMBOL
(
_insl_ns
);
EXPORT_SYMBOL
(
_outsl_ns
);
EXPORT_SYMBOL
(
kernel_thread
);
EXPORT_SYMBOL
(
giveup_fpu
);
#ifdef CONFIG_ALTIVEC
EXPORT_SYMBOL
(
giveup_altivec
);
#endif
EXPORT_SYMBOL
(
__flush_icache_range
);
EXPORT_SYMBOL
(
flush_dcache_range
);
EXPORT_SYMBOL
(
memcpy
);
EXPORT_SYMBOL
(
memset
);
EXPORT_SYMBOL
(
memmove
);
EXPORT_SYMBOL
(
memscan
);
EXPORT_SYMBOL
(
memcmp
);
EXPORT_SYMBOL
(
memchr
);
EXPORT_SYMBOL
(
timer_interrupt
);
EXPORT_SYMBOL
(
console_drivers
);
arch/ppc64/kernel/prom.c
deleted
100644 → 0
View file @
302fe175
/*
*
*
* Procedures for interfacing to Open Firmware.
*
* Paul Mackerras August 1996.
* Copyright (C) 1996 Paul Mackerras.
*
* Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
* {engebret|bergner}@us.ibm.com
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#undef DEBUG
#include <stdarg.h>
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/threads.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/stringify.h>
#include <linux/delay.h>
#include <linux/initrd.h>
#include <linux/bitops.h>
#include <linux/module.h>
#include <linux/module.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/lmb.h>
#include <asm/abs_addr.h>
#include <asm/page.h>
#include <asm/processor.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/system.h>
#include <asm/mmu.h>
#include <asm/pgtable.h>
#include <asm/pci.h>
#include <asm/iommu.h>
#include <asm/btext.h>
#include <asm/sections.h>
#include <asm/machdep.h>
#include <asm/pSeries_reconfig.h>
#ifdef DEBUG
#define DBG(fmt...) udbg_printf(fmt)
#else
#define DBG(fmt...)
#endif
struct
pci_reg_property
{
struct
pci_address
addr
;
u32
size_hi
;
u32
size_lo
;
};
struct
isa_reg_property
{
u32
space
;
u32
address
;
u32
size
;
};
typedef
int
interpret_func
(
struct
device_node
*
,
unsigned
long
*
,
int
,
int
,
int
);
extern
struct
rtas_t
rtas
;
extern
struct
lmb
lmb
;
extern
unsigned
long
klimit
;
extern
unsigned
long
memory_limit
;
static
int
__initdata
dt_root_addr_cells
;
static
int
__initdata
dt_root_size_cells
;
static
int
__initdata
iommu_is_off
;
int
__initdata
iommu_force_on
;
unsigned
long
tce_alloc_start
,
tce_alloc_end
;
typedef
u32
cell_t
;
#if 0
static struct boot_param_header *initial_boot_params __initdata;
#else
struct
boot_param_header
*
initial_boot_params
;
#endif
static
struct
device_node
*
allnodes
=
NULL
;
/* use when traversing tree through the allnext, child, sibling,
* or parent members of struct device_node.
*/
static
DEFINE_RWLOCK
(
devtree_lock
);
/* export that to outside world */
struct
device_node
*
of_chosen
;
/*
* Wrapper for allocating memory for various data that needs to be
* attached to device nodes as they are processed at boot or when
* added to the device tree later (e.g. DLPAR). At boot there is
* already a region reserved so we just increment *mem_start by size;
* otherwise we call kmalloc.
*/
static
void
*
prom_alloc
(
unsigned
long
size
,
unsigned
long
*
mem_start
)
{
unsigned
long
tmp
;
if
(
!
mem_start
)
return
kmalloc
(
size
,
GFP_KERNEL
);
tmp
=
*
mem_start
;
*
mem_start
+=
size
;
return
(
void
*
)
tmp
;
}
/*
* Find the device_node with a given phandle.
*/
static
struct
device_node
*
find_phandle
(
phandle
ph
)
{
struct
device_node
*
np
;
for
(
np
=
allnodes
;
np
!=
0
;
np
=
np
->
allnext
)
if
(
np
->
linux_phandle
==
ph
)
return
np
;
return
NULL
;
}
/*
* Find the interrupt parent of a node.
*/
static
struct
device_node
*
__devinit
intr_parent
(
struct
device_node
*
p
)
{
phandle
*
parp
;
parp
=
(
phandle
*
)
get_property
(
p
,
"interrupt-parent"
,
NULL
);
if
(
parp
==
NULL
)
return
p
->
parent
;
return
find_phandle
(
*
parp
);
}
/*
* Find out the size of each entry of the interrupts property
* for a node.
*/
int
__devinit
prom_n_intr_cells
(
struct
device_node
*
np
)
{
struct
device_node
*
p
;
unsigned
int
*
icp
;
for
(
p
=
np
;
(
p
=
intr_parent
(
p
))
!=
NULL
;
)
{
icp
=
(
unsigned
int
*
)
get_property
(
p
,
"#interrupt-cells"
,
NULL
);
if
(
icp
!=
NULL
)
return
*
icp
;
if
(
get_property
(
p
,
"interrupt-controller"
,
NULL
)
!=
NULL
||
get_property
(
p
,
"interrupt-map"
,
NULL
)
!=
NULL
)
{
printk
(
"oops, node %s doesn't have #interrupt-cells
\n
"
,
p
->
full_name
);
return
1
;
}
}
#ifdef DEBUG_IRQ
printk
(
"prom_n_intr_cells failed for %s
\n
"
,
np
->
full_name
);
#endif
return
1
;
}
/*
* Map an interrupt from a device up to the platform interrupt
* descriptor.
*/
static
int
__devinit
map_interrupt
(
unsigned
int
**
irq
,
struct
device_node
**
ictrler
,
struct
device_node
*
np
,
unsigned
int
*
ints
,
int
nintrc
)
{
struct
device_node
*
p
,
*
ipar
;
unsigned
int
*
imap
,
*
imask
,
*
ip
;
int
i
,
imaplen
,
match
;
int
newintrc
=
0
,
newaddrc
=
0
;
unsigned
int
*
reg
;
int
naddrc
;
reg
=
(
unsigned
int
*
)
get_property
(
np
,
"reg"
,
NULL
);
naddrc
=
prom_n_addr_cells
(
np
);
p
=
intr_parent
(
np
);
while
(
p
!=
NULL
)
{
if
(
get_property
(
p
,
"interrupt-controller"
,
NULL
)
!=
NULL
)
/* this node is an interrupt controller, stop here */
break
;
imap
=
(
unsigned
int
*
)
get_property
(
p
,
"interrupt-map"
,
&
imaplen
);
if
(
imap
==
NULL
)
{
p
=
intr_parent
(
p
);
continue
;
}
imask
=
(
unsigned
int
*
)
get_property
(
p
,
"interrupt-map-mask"
,
NULL
);
if
(
imask
==
NULL
)
{
printk
(
"oops, %s has interrupt-map but no mask
\n
"
,
p
->
full_name
);
return
0
;
}
imaplen
/=
sizeof
(
unsigned
int
);
match
=
0
;
ipar
=
NULL
;
while
(
imaplen
>
0
&&
!
match
)
{
/* check the child-interrupt field */
match
=
1
;
for
(
i
=
0
;
i
<
naddrc
&&
match
;
++
i
)
match
=
((
reg
[
i
]
^
imap
[
i
])
&
imask
[
i
])
==
0
;
for
(;
i
<
naddrc
+
nintrc
&&
match
;
++
i
)
match
=
((
ints
[
i
-
naddrc
]
^
imap
[
i
])
&
imask
[
i
])
==
0
;
imap
+=
naddrc
+
nintrc
;
imaplen
-=
naddrc
+
nintrc
;
/* grab the interrupt parent */
ipar
=
find_phandle
((
phandle
)
*
imap
++
);
--
imaplen
;
if
(
ipar
==
NULL
)
{
printk
(
"oops, no int parent %x in map of %s
\n
"
,
imap
[
-
1
],
p
->
full_name
);
return
0
;
}
/* find the parent's # addr and intr cells */
ip
=
(
unsigned
int
*
)
get_property
(
ipar
,
"#interrupt-cells"
,
NULL
);
if
(
ip
==
NULL
)
{
printk
(
"oops, no #interrupt-cells on %s
\n
"
,
ipar
->
full_name
);
return
0
;
}
newintrc
=
*
ip
;
ip
=
(
unsigned
int
*
)
get_property
(
ipar
,
"#address-cells"
,
NULL
);
newaddrc
=
(
ip
==
NULL
)
?
0
:
*
ip
;
imap
+=
newaddrc
+
newintrc
;
imaplen
-=
newaddrc
+
newintrc
;
}
if
(
imaplen
<
0
)
{
printk
(
"oops, error decoding int-map on %s, len=%d
\n
"
,
p
->
full_name
,
imaplen
);
return
0
;
}
if
(
!
match
)
{
#ifdef DEBUG_IRQ
printk
(
"oops, no match in %s int-map for %s
\n
"
,
p
->
full_name
,
np
->
full_name
);
#endif
return
0
;
}
p
=
ipar
;
naddrc
=
newaddrc
;
nintrc
=
newintrc
;
ints
=
imap
-
nintrc
;
reg
=
ints
-
naddrc
;
}
if
(
p
==
NULL
)
{
#ifdef DEBUG_IRQ
printk
(
"hmmm, int tree for %s doesn't have ctrler
\n
"
,
np
->
full_name
);
#endif
return
0
;
}
*
irq
=
ints
;
*
ictrler
=
p
;
return
nintrc
;
}
static
int
__devinit
finish_node_interrupts
(
struct
device_node
*
np
,
unsigned
long
*
mem_start
,
int
measure_only
)
{
unsigned
int
*
ints
;
int
intlen
,
intrcells
,
intrcount
;
int
i
,
j
,
n
;
unsigned
int
*
irq
,
virq
;
struct
device_node
*
ic
;
ints
=
(
unsigned
int
*
)
get_property
(
np
,
"interrupts"
,
&
intlen
);
if
(
ints
==
NULL
)
return
0
;
intrcells
=
prom_n_intr_cells
(
np
);
intlen
/=
intrcells
*
sizeof
(
unsigned
int
);
np
->
intrs
=
prom_alloc
(
intlen
*
sizeof
(
*
(
np
->
intrs
)),
mem_start
);
if
(
!
np
->
intrs
)
return
-
ENOMEM
;
if
(
measure_only
)
return
0
;
intrcount
=
0
;
for
(
i
=
0
;
i
<
intlen
;
++
i
,
ints
+=
intrcells
)
{
n
=
map_interrupt
(
&
irq
,
&
ic
,
np
,
ints
,
intrcells
);
if
(
n
<=
0
)
continue
;
/* don't map IRQ numbers under a cascaded 8259 controller */
if
(
ic
&&
device_is_compatible
(
ic
,
"chrp,iic"
))
{
np
->
intrs
[
intrcount
].
line
=
irq
[
0
];
}
else
{
virq
=
virt_irq_create_mapping
(
irq
[
0
]);
if
(
virq
==
NO_IRQ
)
{
printk
(
KERN_CRIT
"Could not allocate interrupt"
" number for %s
\n
"
,
np
->
full_name
);
continue
;
}
np
->
intrs
[
intrcount
].
line
=
irq_offset_up
(
virq
);
}
/* We offset irq numbers for the u3 MPIC by 128 in PowerMac */
if
(
_machine
==
PLATFORM_POWERMAC
&&
ic
&&
ic
->
parent
)
{
char
*
name
=
get_property
(
ic
->
parent
,
"name"
,
NULL
);
if
(
name
&&
!
strcmp
(
name
,
"u3"
))
np
->
intrs
[
intrcount
].
line
+=
128
;
else
if
(
!
(
name
&&
!
strcmp
(
name
,
"mac-io"
)))
/* ignore other cascaded controllers, such as
the k2-sata-root */
break
;
}
np
->
intrs
[
intrcount
].
sense
=
1
;
if
(
n
>
1
)
np
->
intrs
[
intrcount
].
sense
=
irq
[
1
];
if
(
n
>
2
)
{
printk
(
"hmmm, got %d intr cells for %s:"
,
n
,
np
->
full_name
);
for
(
j
=
0
;
j
<
n
;
++
j
)
printk
(
" %d"
,
irq
[
j
]);
printk
(
"
\n
"
);
}
++
intrcount
;
}
np
->
n_intrs
=
intrcount
;
return
0
;
}
static
int
__devinit
interpret_pci_props
(
struct
device_node
*
np
,
unsigned
long
*
mem_start
,
int
naddrc
,
int
nsizec
,
int
measure_only
)
{
struct
address_range
*
adr
;
struct
pci_reg_property
*
pci_addrs
;
int
i
,
l
,
n_addrs
;
pci_addrs
=
(
struct
pci_reg_property
*
)
get_property
(
np
,
"assigned-addresses"
,
&
l
);
if
(
!
pci_addrs
)
return
0
;
n_addrs
=
l
/
sizeof
(
*
pci_addrs
);
adr
=
prom_alloc
(
n_addrs
*
sizeof
(
*
adr
),
mem_start
);
if
(
!
adr
)
return
-
ENOMEM
;
if
(
measure_only
)
return
0
;
np
->
addrs
=
adr
;
np
->
n_addrs
=
n_addrs
;
for
(
i
=
0
;
i
<
n_addrs
;
i
++
)
{
adr
[
i
].
space
=
pci_addrs
[
i
].
addr
.
a_hi
;
adr
[
i
].
address
=
pci_addrs
[
i
].
addr
.
a_lo
|
((
u64
)
pci_addrs
[
i
].
addr
.
a_mid
<<
32
);
adr
[
i
].
size
=
pci_addrs
[
i
].
size_lo
;
}
return
0
;
}
static
int
__init
interpret_dbdma_props
(
struct
device_node
*
np
,
unsigned
long
*
mem_start
,
int
naddrc
,
int
nsizec
,
int
measure_only
)
{
struct
reg_property32
*
rp
;
struct
address_range
*
adr
;
unsigned
long
base_address
;
int
i
,
l
;
struct
device_node
*
db
;
base_address
=
0
;
if
(
!
measure_only
)
{
for
(
db
=
np
->
parent
;
db
!=
NULL
;
db
=
db
->
parent
)
{
if
(
!
strcmp
(
db
->
type
,
"dbdma"
)
&&
db
->
n_addrs
!=
0
)
{
base_address
=
db
->
addrs
[
0
].
address
;
break
;
}
}
}
rp
=
(
struct
reg_property32
*
)
get_property
(
np
,
"reg"
,
&
l
);
if
(
rp
!=
0
&&
l
>=
sizeof
(
struct
reg_property32
))
{
i
=
0
;
adr
=
(
struct
address_range
*
)
(
*
mem_start
);
while
((
l
-=
sizeof
(
struct
reg_property32
))
>=
0
)
{
if
(
!
measure_only
)
{
adr
[
i
].
space
=
2
;
adr
[
i
].
address
=
rp
[
i
].
address
+
base_address
;
adr
[
i
].
size
=
rp
[
i
].
size
;
}
++
i
;
}
np
->
addrs
=
adr
;
np
->
n_addrs
=
i
;
(
*
mem_start
)
+=
i
*
sizeof
(
struct
address_range
);
}
return
0
;
}
static
int
__init
interpret_macio_props
(
struct
device_node
*
np
,
unsigned
long
*
mem_start
,
int
naddrc
,
int
nsizec
,
int
measure_only
)
{
struct
reg_property32
*
rp
;
struct
address_range
*
adr
;
unsigned
long
base_address
;
int
i
,
l
;
struct
device_node
*
db
;
base_address
=
0
;
if
(
!
measure_only
)
{
for
(
db
=
np
->
parent
;
db
!=
NULL
;
db
=
db
->
parent
)
{
if
(
!
strcmp
(
db
->
type
,
"mac-io"
)
&&
db
->
n_addrs
!=
0
)
{
base_address
=
db
->
addrs
[
0
].
address
;
break
;
}
}
}
rp
=
(
struct
reg_property32
*
)
get_property
(
np
,
"reg"
,
&
l
);
if
(
rp
!=
0
&&
l
>=
sizeof
(
struct
reg_property32
))
{
i
=
0
;
adr
=
(
struct
address_range
*
)
(
*
mem_start
);
while
((
l
-=
sizeof
(
struct
reg_property32
))
>=
0
)
{
if
(
!
measure_only
)
{
adr
[
i
].
space
=
2
;
adr
[
i
].
address
=
rp
[
i
].
address
+
base_address
;
adr
[
i
].
size
=
rp
[
i
].
size
;
}
++
i
;
}
np
->
addrs
=
adr
;
np
->
n_addrs
=
i
;
(
*
mem_start
)
+=
i
*
sizeof
(
struct
address_range
);
}
return
0
;
}
static
int
__init
interpret_isa_props
(
struct
device_node
*
np
,
unsigned
long
*
mem_start
,
int
naddrc
,
int
nsizec
,
int
measure_only
)
{
struct
isa_reg_property
*
rp
;
struct
address_range
*
adr
;
int
i
,
l
;
rp
=
(
struct
isa_reg_property
*
)
get_property
(
np
,
"reg"
,
&
l
);
if
(
rp
!=
0
&&
l
>=
sizeof
(
struct
isa_reg_property
))
{
i
=
0
;
adr
=
(
struct
address_range
*
)
(
*
mem_start
);
while
((
l
-=
sizeof
(
struct
isa_reg_property
))
>=
0
)
{
if
(
!
measure_only
)
{
adr
[
i
].
space
=
rp
[
i
].
space
;
adr
[
i
].
address
=
rp
[
i
].
address
;
adr
[
i
].
size
=
rp
[
i
].
size
;
}
++
i
;
}
np
->
addrs
=
adr
;
np
->
n_addrs
=
i
;
(
*
mem_start
)
+=
i
*
sizeof
(
struct
address_range
);
}
return
0
;
}
static
int
__init
interpret_root_props
(
struct
device_node
*
np
,
unsigned
long
*
mem_start
,
int
naddrc
,
int
nsizec
,
int
measure_only
)
{
struct
address_range
*
adr
;
int
i
,
l
;
unsigned
int
*
rp
;
int
rpsize
=
(
naddrc
+
nsizec
)
*
sizeof
(
unsigned
int
);
rp
=
(
unsigned
int
*
)
get_property
(
np
,
"reg"
,
&
l
);
if
(
rp
!=
0
&&
l
>=
rpsize
)
{
i
=
0
;
adr
=
(
struct
address_range
*
)
(
*
mem_start
);
while
((
l
-=
rpsize
)
>=
0
)
{
if
(
!
measure_only
)
{
adr
[
i
].
space
=
0
;
adr
[
i
].
address
=
rp
[
naddrc
-
1
];
adr
[
i
].
size
=
rp
[
naddrc
+
nsizec
-
1
];
}
++
i
;
rp
+=
naddrc
+
nsizec
;
}
np
->
addrs
=
adr
;
np
->
n_addrs
=
i
;
(
*
mem_start
)
+=
i
*
sizeof
(
struct
address_range
);
}
return
0
;
}
static
int
__devinit
finish_node
(
struct
device_node
*
np
,
unsigned
long
*
mem_start
,
interpret_func
*
ifunc
,
int
naddrc
,
int
nsizec
,
int
measure_only
)
{
struct
device_node
*
child
;
int
*
ip
,
rc
=
0
;
/* get the device addresses and interrupts */
if
(
ifunc
!=
NULL
)
rc
=
ifunc
(
np
,
mem_start
,
naddrc
,
nsizec
,
measure_only
);
if
(
rc
)
goto
out
;
rc
=
finish_node_interrupts
(
np
,
mem_start
,
measure_only
);
if
(
rc
)
goto
out
;
/* Look for #address-cells and #size-cells properties. */
ip
=
(
int
*
)
get_property
(
np
,
"#address-cells"
,
NULL
);
if
(
ip
!=
NULL
)
naddrc
=
*
ip
;
ip
=
(
int
*
)
get_property
(
np
,
"#size-cells"
,
NULL
);
if
(
ip
!=
NULL
)
nsizec
=
*
ip
;
if
(
!
strcmp
(
np
->
name
,
"device-tree"
)
||
np
->
parent
==
NULL
)
ifunc
=
interpret_root_props
;
else
if
(
np
->
type
==
0
)
ifunc
=
NULL
;
else
if
(
!
strcmp
(
np
->
type
,
"pci"
)
||
!
strcmp
(
np
->
type
,
"vci"
))
ifunc
=
interpret_pci_props
;
else
if
(
!
strcmp
(
np
->
type
,
"dbdma"
))
ifunc
=
interpret_dbdma_props
;
else
if
(
!
strcmp
(
np
->
type
,
"mac-io"
)
||
ifunc
==
interpret_macio_props
)
ifunc
=
interpret_macio_props
;
else
if
(
!
strcmp
(
np
->
type
,
"isa"
))
ifunc
=
interpret_isa_props
;
else
if
(
!
strcmp
(
np
->
name
,
"uni-n"
)
||
!
strcmp
(
np
->
name
,
"u3"
))
ifunc
=
interpret_root_props
;
else
if
(
!
((
ifunc
==
interpret_dbdma_props
||
ifunc
==
interpret_macio_props
)
&&
(
!
strcmp
(
np
->
type
,
"escc"
)
||
!
strcmp
(
np
->
type
,
"media-bay"
))))
ifunc
=
NULL
;
for
(
child
=
np
->
child
;
child
!=
NULL
;
child
=
child
->
sibling
)
{
rc
=
finish_node
(
child
,
mem_start
,
ifunc
,
naddrc
,
nsizec
,
measure_only
);
if
(
rc
)
goto
out
;
}
out:
return
rc
;
}
/**
* finish_device_tree is called once things are running normally
* (i.e. with text and data mapped to the address they were linked at).
* It traverses the device tree and fills in some of the additional,
* fields in each node like {n_}addrs and {n_}intrs, the virt interrupt
* mapping is also initialized at this point.
*/
void
__init
finish_device_tree
(
void
)
{
unsigned
long
start
,
end
,
size
=
0
;
DBG
(
" -> finish_device_tree
\n
"
);
if
(
ppc64_interrupt_controller
==
IC_INVALID
)
{
DBG
(
"failed to configure interrupt controller type
\n
"
);
panic
(
"failed to configure interrupt controller type
\n
"
);
}
/* Initialize virtual IRQ map */
virt_irq_init
();
/*
* Finish device-tree (pre-parsing some properties etc...)
* We do this in 2 passes. One with "measure_only" set, which
* will only measure the amount of memory needed, then we can
* allocate that memory, and call finish_node again. However,
* we must be careful as most routines will fail nowadays when
* prom_alloc() returns 0, so we must make sure our first pass
* doesn't start at 0. We pre-initialize size to 16 for that
* reason and then remove those additional 16 bytes
*/
size
=
16
;
finish_node
(
allnodes
,
&
size
,
NULL
,
0
,
0
,
1
);
size
-=
16
;
end
=
start
=
(
unsigned
long
)
abs_to_virt
(
lmb_alloc
(
size
,
128
));
finish_node
(
allnodes
,
&
end
,
NULL
,
0
,
0
,
0
);
BUG_ON
(
end
!=
start
+
size
);
DBG
(
" <- finish_device_tree
\n
"
);
}
#ifdef DEBUG
#define printk udbg_printf
#endif
static
inline
char
*
find_flat_dt_string
(
u32
offset
)
{
return
((
char
*
)
initial_boot_params
)
+
initial_boot_params
->
off_dt_strings
+
offset
;
}
/**
* This function is used to scan the flattened device-tree, it is
* used to extract the memory informations at boot before we can
* unflatten the tree
*/
int
__init
of_scan_flat_dt
(
int
(
*
it
)(
unsigned
long
node
,
const
char
*
uname
,
int
depth
,
void
*
data
),
void
*
data
)
{
unsigned
long
p
=
((
unsigned
long
)
initial_boot_params
)
+
initial_boot_params
->
off_dt_struct
;
int
rc
=
0
;
int
depth
=
-
1
;
do
{
u32
tag
=
*
((
u32
*
)
p
);
char
*
pathp
;
p
+=
4
;
if
(
tag
==
OF_DT_END_NODE
)
{
depth
--
;
continue
;
}
if
(
tag
==
OF_DT_NOP
)
continue
;
if
(
tag
==
OF_DT_END
)
break
;
if
(
tag
==
OF_DT_PROP
)
{
u32
sz
=
*
((
u32
*
)
p
);
p
+=
8
;
if
(
initial_boot_params
->
version
<
0x10
)
p
=
_ALIGN
(
p
,
sz
>=
8
?
8
:
4
);
p
+=
sz
;
p
=
_ALIGN
(
p
,
4
);
continue
;
}
if
(
tag
!=
OF_DT_BEGIN_NODE
)
{
printk
(
KERN_WARNING
"Invalid tag %x scanning flattened"
" device tree !
\n
"
,
tag
);
return
-
EINVAL
;
}
depth
++
;
pathp
=
(
char
*
)
p
;
p
=
_ALIGN
(
p
+
strlen
(
pathp
)
+
1
,
4
);
if
((
*
pathp
)
==
'/'
)
{
char
*
lp
,
*
np
;
for
(
lp
=
NULL
,
np
=
pathp
;
*
np
;
np
++
)
if
((
*
np
)
==
'/'
)
lp
=
np
+
1
;
if
(
lp
!=
NULL
)
pathp
=
lp
;
}
rc
=
it
(
p
,
pathp
,
depth
,
data
);
if
(
rc
!=
0
)
break
;
}
while
(
1
);
return
rc
;
}
/**
* This function can be used within scan_flattened_dt callback to get
* access to properties
*/
void
*
__init
of_get_flat_dt_prop
(
unsigned
long
node
,
const
char
*
name
,
unsigned
long
*
size
)
{
unsigned
long
p
=
node
;
do
{
u32
tag
=
*
((
u32
*
)
p
);
u32
sz
,
noff
;
const
char
*
nstr
;
p
+=
4
;
if
(
tag
==
OF_DT_NOP
)
continue
;
if
(
tag
!=
OF_DT_PROP
)
return
NULL
;
sz
=
*
((
u32
*
)
p
);
noff
=
*
((
u32
*
)(
p
+
4
));
p
+=
8
;
if
(
initial_boot_params
->
version
<
0x10
)
p
=
_ALIGN
(
p
,
sz
>=
8
?
8
:
4
);
nstr
=
find_flat_dt_string
(
noff
);
if
(
nstr
==
NULL
)
{
printk
(
KERN_WARNING
"Can't find property index"
" name !
\n
"
);
return
NULL
;
}
if
(
strcmp
(
name
,
nstr
)
==
0
)
{
if
(
size
)
*
size
=
sz
;
return
(
void
*
)
p
;
}
p
+=
sz
;
p
=
_ALIGN
(
p
,
4
);
}
while
(
1
);
}
static
void
*
__init
unflatten_dt_alloc
(
unsigned
long
*
mem
,
unsigned
long
size
,
unsigned
long
align
)
{
void
*
res
;
*
mem
=
_ALIGN
(
*
mem
,
align
);
res
=
(
void
*
)
*
mem
;
*
mem
+=
size
;
return
res
;
}
static
unsigned
long
__init
unflatten_dt_node
(
unsigned
long
mem
,
unsigned
long
*
p
,
struct
device_node
*
dad
,
struct
device_node
***
allnextpp
,
unsigned
long
fpsize
)
{
struct
device_node
*
np
;
struct
property
*
pp
,
**
prev_pp
=
NULL
;
char
*
pathp
;
u32
tag
;
unsigned
int
l
,
allocl
;
int
has_name
=
0
;
int
new_format
=
0
;
tag
=
*
((
u32
*
)(
*
p
));
if
(
tag
!=
OF_DT_BEGIN_NODE
)
{
printk
(
"Weird tag at start of node: %x
\n
"
,
tag
);
return
mem
;
}
*
p
+=
4
;
pathp
=
(
char
*
)
*
p
;
l
=
allocl
=
strlen
(
pathp
)
+
1
;
*
p
=
_ALIGN
(
*
p
+
l
,
4
);
/* version 0x10 has a more compact unit name here instead of the full
* path. we accumulate the full path size using "fpsize", we'll rebuild
* it later. We detect this because the first character of the name is
* not '/'.
*/
if
((
*
pathp
)
!=
'/'
)
{
new_format
=
1
;
if
(
fpsize
==
0
)
{
/* root node: special case. fpsize accounts for path
* plus terminating zero. root node only has '/', so
* fpsize should be 2, but we want to avoid the first
* level nodes to have two '/' so we use fpsize 1 here
*/
fpsize
=
1
;
allocl
=
2
;
}
else
{
/* account for '/' and path size minus terminal 0
* already in 'l'
*/
fpsize
+=
l
;
allocl
=
fpsize
;
}
}
np
=
unflatten_dt_alloc
(
&
mem
,
sizeof
(
struct
device_node
)
+
allocl
,
__alignof__
(
struct
device_node
));
if
(
allnextpp
)
{
memset
(
np
,
0
,
sizeof
(
*
np
));
np
->
full_name
=
((
char
*
)
np
)
+
sizeof
(
struct
device_node
);
if
(
new_format
)
{
char
*
p
=
np
->
full_name
;
/* rebuild full path for new format */
if
(
dad
&&
dad
->
parent
)
{
strcpy
(
p
,
dad
->
full_name
);
#ifdef DEBUG
if
((
strlen
(
p
)
+
l
+
1
)
!=
allocl
)
{
DBG
(
"%s: p: %d, l: %d, a: %d
\n
"
,
pathp
,
strlen
(
p
),
l
,
allocl
);
}
#endif
p
+=
strlen
(
p
);
}
*
(
p
++
)
=
'/'
;
memcpy
(
p
,
pathp
,
l
);
}
else
memcpy
(
np
->
full_name
,
pathp
,
l
);
prev_pp
=
&
np
->
properties
;
**
allnextpp
=
np
;
*
allnextpp
=
&
np
->
allnext
;
if
(
dad
!=
NULL
)
{
np
->
parent
=
dad
;
/* we temporarily use the next field as `last_child'*/
if
(
dad
->
next
==
0
)
dad
->
child
=
np
;
else
dad
->
next
->
sibling
=
np
;
dad
->
next
=
np
;
}
kref_init
(
&
np
->
kref
);
}
while
(
1
)
{
u32
sz
,
noff
;
char
*
pname
;
tag
=
*
((
u32
*
)(
*
p
));
if
(
tag
==
OF_DT_NOP
)
{
*
p
+=
4
;
continue
;
}
if
(
tag
!=
OF_DT_PROP
)
break
;
*
p
+=
4
;
sz
=
*
((
u32
*
)(
*
p
));
noff
=
*
((
u32
*
)((
*
p
)
+
4
));
*
p
+=
8
;
if
(
initial_boot_params
->
version
<
0x10
)
*
p
=
_ALIGN
(
*
p
,
sz
>=
8
?
8
:
4
);
pname
=
find_flat_dt_string
(
noff
);
if
(
pname
==
NULL
)
{
printk
(
"Can't find property name in list !
\n
"
);
break
;
}
if
(
strcmp
(
pname
,
"name"
)
==
0
)
has_name
=
1
;
l
=
strlen
(
pname
)
+
1
;
pp
=
unflatten_dt_alloc
(
&
mem
,
sizeof
(
struct
property
),
__alignof__
(
struct
property
));
if
(
allnextpp
)
{
if
(
strcmp
(
pname
,
"linux,phandle"
)
==
0
)
{
np
->
node
=
*
((
u32
*
)
*
p
);
if
(
np
->
linux_phandle
==
0
)
np
->
linux_phandle
=
np
->
node
;
}
if
(
strcmp
(
pname
,
"ibm,phandle"
)
==
0
)
np
->
linux_phandle
=
*
((
u32
*
)
*
p
);
pp
->
name
=
pname
;
pp
->
length
=
sz
;
pp
->
value
=
(
void
*
)
*
p
;
*
prev_pp
=
pp
;
prev_pp
=
&
pp
->
next
;
}
*
p
=
_ALIGN
((
*
p
)
+
sz
,
4
);
}
/* with version 0x10 we may not have the name property, recreate
* it here from the unit name if absent
*/
if
(
!
has_name
)
{
char
*
p
=
pathp
,
*
ps
=
pathp
,
*
pa
=
NULL
;
int
sz
;
while
(
*
p
)
{
if
((
*
p
)
==
'@'
)
pa
=
p
;
if
((
*
p
)
==
'/'
)
ps
=
p
+
1
;
p
++
;
}
if
(
pa
<
ps
)
pa
=
p
;
sz
=
(
pa
-
ps
)
+
1
;
pp
=
unflatten_dt_alloc
(
&
mem
,
sizeof
(
struct
property
)
+
sz
,
__alignof__
(
struct
property
));
if
(
allnextpp
)
{
pp
->
name
=
"name"
;
pp
->
length
=
sz
;
pp
->
value
=
(
unsigned
char
*
)(
pp
+
1
);
*
prev_pp
=
pp
;
prev_pp
=
&
pp
->
next
;
memcpy
(
pp
->
value
,
ps
,
sz
-
1
);
((
char
*
)
pp
->
value
)[
sz
-
1
]
=
0
;
DBG
(
"fixed up name for %s -> %s
\n
"
,
pathp
,
pp
->
value
);
}
}
if
(
allnextpp
)
{
*
prev_pp
=
NULL
;
np
->
name
=
get_property
(
np
,
"name"
,
NULL
);
np
->
type
=
get_property
(
np
,
"device_type"
,
NULL
);
if
(
!
np
->
name
)
np
->
name
=
"<NULL>"
;
if
(
!
np
->
type
)
np
->
type
=
"<NULL>"
;
}
while
(
tag
==
OF_DT_BEGIN_NODE
)
{
mem
=
unflatten_dt_node
(
mem
,
p
,
np
,
allnextpp
,
fpsize
);
tag
=
*
((
u32
*
)(
*
p
));
}
if
(
tag
!=
OF_DT_END_NODE
)
{
printk
(
"Weird tag at end of node: %x
\n
"
,
tag
);
return
mem
;
}
*
p
+=
4
;
return
mem
;
}
/**
* unflattens the device-tree passed by the firmware, creating the
* tree of struct device_node. It also fills the "name" and "type"
* pointers of the nodes so the normal device-tree walking functions
* can be used (this used to be done by finish_device_tree)
*/
void
__init
unflatten_device_tree
(
void
)
{
unsigned
long
start
,
mem
,
size
;
struct
device_node
**
allnextp
=
&
allnodes
;
char
*
p
=
NULL
;
int
l
=
0
;
DBG
(
" -> unflatten_device_tree()
\n
"
);
/* First pass, scan for size */
start
=
((
unsigned
long
)
initial_boot_params
)
+
initial_boot_params
->
off_dt_struct
;
size
=
unflatten_dt_node
(
0
,
&
start
,
NULL
,
NULL
,
0
);
size
=
(
size
|
3
)
+
1
;
DBG
(
" size is %lx, allocating...
\n
"
,
size
);
/* Allocate memory for the expanded device tree */
mem
=
lmb_alloc
(
size
+
4
,
__alignof__
(
struct
device_node
));
if
(
!
mem
)
{
DBG
(
"Couldn't allocate memory with lmb_alloc()!
\n
"
);
panic
(
"Couldn't allocate memory with lmb_alloc()!
\n
"
);
}
mem
=
(
unsigned
long
)
abs_to_virt
(
mem
);
((
u32
*
)
mem
)[
size
/
4
]
=
0xdeadbeef
;
DBG
(
" unflattening...
\n
"
,
mem
);
/* Second pass, do actual unflattening */
start
=
((
unsigned
long
)
initial_boot_params
)
+
initial_boot_params
->
off_dt_struct
;
unflatten_dt_node
(
mem
,
&
start
,
NULL
,
&
allnextp
,
0
);
if
(
*
((
u32
*
)
start
)
!=
OF_DT_END
)
printk
(
KERN_WARNING
"Weird tag at end of tree: %08x
\n
"
,
*
((
u32
*
)
start
));
if
(((
u32
*
)
mem
)[
size
/
4
]
!=
0xdeadbeef
)
printk
(
KERN_WARNING
"End of tree marker overwritten: %08x
\n
"
,
((
u32
*
)
mem
)[
size
/
4
]
);
*
allnextp
=
NULL
;
/* Get pointer to OF "/chosen" node for use everywhere */
of_chosen
=
of_find_node_by_path
(
"/chosen"
);
/* Retreive command line */
if
(
of_chosen
!=
NULL
)
{
p
=
(
char
*
)
get_property
(
of_chosen
,
"bootargs"
,
&
l
);
if
(
p
!=
NULL
&&
l
>
0
)
strlcpy
(
cmd_line
,
p
,
min
(
l
,
COMMAND_LINE_SIZE
));
}
#ifdef CONFIG_CMDLINE
if
(
l
==
0
||
(
l
==
1
&&
(
*
p
)
==
0
))
strlcpy
(
cmd_line
,
CONFIG_CMDLINE
,
COMMAND_LINE_SIZE
);
#endif
/* CONFIG_CMDLINE */
DBG
(
"Command line is: %s
\n
"
,
cmd_line
);
DBG
(
" <- unflatten_device_tree()
\n
"
);
}
static
int
__init
early_init_dt_scan_cpus
(
unsigned
long
node
,
const
char
*
uname
,
int
depth
,
void
*
data
)
{
char
*
type
=
of_get_flat_dt_prop
(
node
,
"device_type"
,
NULL
);
u32
*
prop
;
unsigned
long
size
;
/* We are scanning "cpu" nodes only */
if
(
type
==
NULL
||
strcmp
(
type
,
"cpu"
)
!=
0
)
return
0
;
if
(
initial_boot_params
&&
initial_boot_params
->
version
>=
2
)
{
/* version 2 of the kexec param format adds the phys cpuid
* of booted proc.
*/
boot_cpuid_phys
=
initial_boot_params
->
boot_cpuid_phys
;
boot_cpuid
=
0
;
}
else
{
/* Check if it's the boot-cpu, set it's hw index in paca now */
if
(
of_get_flat_dt_prop
(
node
,
"linux,boot-cpu"
,
NULL
)
!=
NULL
)
{
u32
*
prop
=
of_get_flat_dt_prop
(
node
,
"reg"
,
NULL
);
set_hard_smp_processor_id
(
0
,
prop
==
NULL
?
0
:
*
prop
);
boot_cpuid_phys
=
get_hard_smp_processor_id
(
0
);
}
}
#ifdef CONFIG_ALTIVEC
/* Check if we have a VMX and eventually update CPU features */
prop
=
(
u32
*
)
of_get_flat_dt_prop
(
node
,
"ibm,vmx"
,
NULL
);
if
(
prop
&&
(
*
prop
)
>
0
)
{
cur_cpu_spec
->
cpu_features
|=
CPU_FTR_ALTIVEC
;
cur_cpu_spec
->
cpu_user_features
|=
PPC_FEATURE_HAS_ALTIVEC
;
}
/* Same goes for Apple's "altivec" property */
prop
=
(
u32
*
)
of_get_flat_dt_prop
(
node
,
"altivec"
,
NULL
);
if
(
prop
)
{
cur_cpu_spec
->
cpu_features
|=
CPU_FTR_ALTIVEC
;
cur_cpu_spec
->
cpu_user_features
|=
PPC_FEATURE_HAS_ALTIVEC
;
}
#endif
/* CONFIG_ALTIVEC */
/*
* Check for an SMT capable CPU and set the CPU feature. We do
* this by looking at the size of the ibm,ppc-interrupt-server#s
* property
*/
prop
=
(
u32
*
)
of_get_flat_dt_prop
(
node
,
"ibm,ppc-interrupt-server#s"
,
&
size
);
cur_cpu_spec
->
cpu_features
&=
~
CPU_FTR_SMT
;
if
(
prop
&&
((
size
/
sizeof
(
u32
))
>
1
))
cur_cpu_spec
->
cpu_features
|=
CPU_FTR_SMT
;
return
0
;
}
static
int
__init
early_init_dt_scan_chosen
(
unsigned
long
node
,
const
char
*
uname
,
int
depth
,
void
*
data
)
{
u32
*
prop
;
u64
*
prop64
;
DBG
(
"search
\"
chosen
\"
, depth: %d, uname: %s
\n
"
,
depth
,
uname
);
if
(
depth
!=
1
||
strcmp
(
uname
,
"chosen"
)
!=
0
)
return
0
;
/* get platform type */
prop
=
(
u32
*
)
of_get_flat_dt_prop
(
node
,
"linux,platform"
,
NULL
);
if
(
prop
==
NULL
)
return
0
;
_machine
=
*
prop
;
/* check if iommu is forced on or off */
if
(
of_get_flat_dt_prop
(
node
,
"linux,iommu-off"
,
NULL
)
!=
NULL
)
iommu_is_off
=
1
;
if
(
of_get_flat_dt_prop
(
node
,
"linux,iommu-force-on"
,
NULL
)
!=
NULL
)
iommu_force_on
=
1
;
prop64
=
(
u64
*
)
of_get_flat_dt_prop
(
node
,
"linux,memory-limit"
,
NULL
);
if
(
prop64
)
memory_limit
=
*
prop64
;
prop64
=
(
u64
*
)
of_get_flat_dt_prop
(
node
,
"linux,tce-alloc-start"
,
NULL
);
if
(
prop64
)
tce_alloc_start
=
*
prop64
;
prop64
=
(
u64
*
)
of_get_flat_dt_prop
(
node
,
"linux,tce-alloc-end"
,
NULL
);
if
(
prop64
)
tce_alloc_end
=
*
prop64
;
#ifdef CONFIG_PPC_RTAS
/* To help early debugging via the front panel, we retreive a minimal
* set of RTAS infos now if available
*/
{
u64
*
basep
,
*
entryp
;
basep
=
(
u64
*
)
of_get_flat_dt_prop
(
node
,
"linux,rtas-base"
,
NULL
);
entryp
=
(
u64
*
)
of_get_flat_dt_prop
(
node
,
"linux,rtas-entry"
,
NULL
);
prop
=
(
u32
*
)
of_get_flat_dt_prop
(
node
,
"linux,rtas-size"
,
NULL
);
if
(
basep
&&
entryp
&&
prop
)
{
rtas
.
base
=
*
basep
;
rtas
.
entry
=
*
entryp
;
rtas
.
size
=
*
prop
;
}
}
#endif
/* CONFIG_PPC_RTAS */
/* break now */
return
1
;
}
static
int
__init
early_init_dt_scan_root
(
unsigned
long
node
,
const
char
*
uname
,
int
depth
,
void
*
data
)
{
u32
*
prop
;
if
(
depth
!=
0
)
return
0
;
prop
=
(
u32
*
)
of_get_flat_dt_prop
(
node
,
"#size-cells"
,
NULL
);
dt_root_size_cells
=
(
prop
==
NULL
)
?
1
:
*
prop
;
DBG
(
"dt_root_size_cells = %x
\n
"
,
dt_root_size_cells
);
prop
=
(
u32
*
)
of_get_flat_dt_prop
(
node
,
"#address-cells"
,
NULL
);
dt_root_addr_cells
=
(
prop
==
NULL
)
?
2
:
*
prop
;
DBG
(
"dt_root_addr_cells = %x
\n
"
,
dt_root_addr_cells
);
/* break now */
return
1
;
}
static
unsigned
long
__init
dt_mem_next_cell
(
int
s
,
cell_t
**
cellp
)
{
cell_t
*
p
=
*
cellp
;
unsigned
long
r
=
0
;
/* Ignore more than 2 cells */
while
(
s
>
2
)
{
p
++
;
s
--
;
}
while
(
s
)
{
r
<<=
32
;
r
|=
*
(
p
++
);
s
--
;
}
*
cellp
=
p
;
return
r
;
}
static
int
__init
early_init_dt_scan_memory
(
unsigned
long
node
,
const
char
*
uname
,
int
depth
,
void
*
data
)
{
char
*
type
=
of_get_flat_dt_prop
(
node
,
"device_type"
,
NULL
);
cell_t
*
reg
,
*
endp
;
unsigned
long
l
;
/* We are scanning "memory" nodes only */
if
(
type
==
NULL
||
strcmp
(
type
,
"memory"
)
!=
0
)
return
0
;
reg
=
(
cell_t
*
)
of_get_flat_dt_prop
(
node
,
"reg"
,
&
l
);
if
(
reg
==
NULL
)
return
0
;
endp
=
reg
+
(
l
/
sizeof
(
cell_t
));
DBG
(
"memory scan node %s ..., reg size %ld, data: %x %x %x %x, ...
\n
"
,
uname
,
l
,
reg
[
0
],
reg
[
1
],
reg
[
2
],
reg
[
3
]);
while
((
endp
-
reg
)
>=
(
dt_root_addr_cells
+
dt_root_size_cells
))
{
unsigned
long
base
,
size
;
base
=
dt_mem_next_cell
(
dt_root_addr_cells
,
&
reg
);
size
=
dt_mem_next_cell
(
dt_root_size_cells
,
&
reg
);
if
(
size
==
0
)
continue
;
DBG
(
" - %lx , %lx
\n
"
,
base
,
size
);
if
(
iommu_is_off
)
{
if
(
base
>=
0x80000000ul
)
continue
;
if
((
base
+
size
)
>
0x80000000ul
)
size
=
0x80000000ul
-
base
;
}
lmb_add
(
base
,
size
);
}
return
0
;
}
static
void
__init
early_reserve_mem
(
void
)
{
u64
base
,
size
;
u64
*
reserve_map
=
(
u64
*
)(((
unsigned
long
)
initial_boot_params
)
+
initial_boot_params
->
off_mem_rsvmap
);
while
(
1
)
{
base
=
*
(
reserve_map
++
);
size
=
*
(
reserve_map
++
);
if
(
size
==
0
)
break
;
DBG
(
"reserving: %lx -> %lx
\n
"
,
base
,
size
);
lmb_reserve
(
base
,
size
);
}
#if 0
DBG("memory reserved, lmbs :\n");
lmb_dump_all();
#endif
}
void
__init
early_init_devtree
(
void
*
params
)
{
DBG
(
" -> early_init_devtree()
\n
"
);
/* Setup flat device-tree pointer */
initial_boot_params
=
params
;
/* Retreive various informations from the /chosen node of the
* device-tree, including the platform type, initrd location and
* size, TCE reserve, and more ...
*/
of_scan_flat_dt
(
early_init_dt_scan_chosen
,
NULL
);
/* Scan memory nodes and rebuild LMBs */
lmb_init
();
of_scan_flat_dt
(
early_init_dt_scan_root
,
NULL
);
of_scan_flat_dt
(
early_init_dt_scan_memory
,
NULL
);
lmb_enforce_memory_limit
(
memory_limit
);
lmb_analyze
();
lmb_reserve
(
0
,
__pa
(
klimit
));
/* Reserve LMB regions used by kernel, initrd, dt, etc... */
early_reserve_mem
();
DBG
(
"Scanning CPUs ...
\n
"
);
/* Retreive hash table size from flattened tree plus other
* CPU related informations (altivec support, boot CPU ID, ...)
*/
of_scan_flat_dt
(
early_init_dt_scan_cpus
,
NULL
);
DBG
(
" <- early_init_devtree()
\n
"
);
}
#undef printk
int
prom_n_addr_cells
(
struct
device_node
*
np
)
{
int
*
ip
;
do
{
if
(
np
->
parent
)
np
=
np
->
parent
;
ip
=
(
int
*
)
get_property
(
np
,
"#address-cells"
,
NULL
);
if
(
ip
!=
NULL
)
return
*
ip
;
}
while
(
np
->
parent
);
/* No #address-cells property for the root node, default to 1 */
return
1
;
}
EXPORT_SYMBOL_GPL
(
prom_n_addr_cells
);
int
prom_n_size_cells
(
struct
device_node
*
np
)
{
int
*
ip
;
do
{
if
(
np
->
parent
)
np
=
np
->
parent
;
ip
=
(
int
*
)
get_property
(
np
,
"#size-cells"
,
NULL
);
if
(
ip
!=
NULL
)
return
*
ip
;
}
while
(
np
->
parent
);
/* No #size-cells property for the root node, default to 1 */
return
1
;
}
EXPORT_SYMBOL_GPL
(
prom_n_size_cells
);
/**
* Work out the sense (active-low level / active-high edge)
* of each interrupt from the device tree.
*/
void
__init
prom_get_irq_senses
(
unsigned
char
*
senses
,
int
off
,
int
max
)
{
struct
device_node
*
np
;
int
i
,
j
;
/* default to level-triggered */
memset
(
senses
,
1
,
max
-
off
);
for
(
np
=
allnodes
;
np
!=
0
;
np
=
np
->
allnext
)
{
for
(
j
=
0
;
j
<
np
->
n_intrs
;
j
++
)
{
i
=
np
->
intrs
[
j
].
line
;
if
(
i
>=
off
&&
i
<
max
)
senses
[
i
-
off
]
=
np
->
intrs
[
j
].
sense
?
IRQ_SENSE_LEVEL
|
IRQ_POLARITY_NEGATIVE
:
IRQ_SENSE_EDGE
|
IRQ_POLARITY_POSITIVE
;
}
}
}
/**
* Construct and return a list of the device_nodes with a given name.
*/
struct
device_node
*
find_devices
(
const
char
*
name
)
{
struct
device_node
*
head
,
**
prevp
,
*
np
;
prevp
=
&
head
;
for
(
np
=
allnodes
;
np
!=
0
;
np
=
np
->
allnext
)
{
if
(
np
->
name
!=
0
&&
strcasecmp
(
np
->
name
,
name
)
==
0
)
{
*
prevp
=
np
;
prevp
=
&
np
->
next
;
}
}
*
prevp
=
NULL
;
return
head
;
}
EXPORT_SYMBOL
(
find_devices
);
/**
* Construct and return a list of the device_nodes with a given type.
*/
struct
device_node
*
find_type_devices
(
const
char
*
type
)
{
struct
device_node
*
head
,
**
prevp
,
*
np
;
prevp
=
&
head
;
for
(
np
=
allnodes
;
np
!=
0
;
np
=
np
->
allnext
)
{
if
(
np
->
type
!=
0
&&
strcasecmp
(
np
->
type
,
type
)
==
0
)
{
*
prevp
=
np
;
prevp
=
&
np
->
next
;
}
}
*
prevp
=
NULL
;
return
head
;
}
EXPORT_SYMBOL
(
find_type_devices
);
/**
* Returns all nodes linked together
*/
struct
device_node
*
find_all_nodes
(
void
)
{
struct
device_node
*
head
,
**
prevp
,
*
np
;
prevp
=
&
head
;
for
(
np
=
allnodes
;
np
!=
0
;
np
=
np
->
allnext
)
{
*
prevp
=
np
;
prevp
=
&
np
->
next
;
}
*
prevp
=
NULL
;
return
head
;
}
EXPORT_SYMBOL
(
find_all_nodes
);
/** Checks if the given "compat" string matches one of the strings in
* the device's "compatible" property
*/
int
device_is_compatible
(
struct
device_node
*
device
,
const
char
*
compat
)
{
const
char
*
cp
;
int
cplen
,
l
;
cp
=
(
char
*
)
get_property
(
device
,
"compatible"
,
&
cplen
);
if
(
cp
==
NULL
)
return
0
;
while
(
cplen
>
0
)
{
if
(
strncasecmp
(
cp
,
compat
,
strlen
(
compat
))
==
0
)
return
1
;
l
=
strlen
(
cp
)
+
1
;
cp
+=
l
;
cplen
-=
l
;
}
return
0
;
}
EXPORT_SYMBOL
(
device_is_compatible
);
/**
* Indicates whether the root node has a given value in its
* compatible property.
*/
int
machine_is_compatible
(
const
char
*
compat
)
{
struct
device_node
*
root
;
int
rc
=
0
;
root
=
of_find_node_by_path
(
"/"
);
if
(
root
)
{
rc
=
device_is_compatible
(
root
,
compat
);
of_node_put
(
root
);
}
return
rc
;
}
EXPORT_SYMBOL
(
machine_is_compatible
);
/**
* Construct and return a list of the device_nodes with a given type
* and compatible property.
*/
struct
device_node
*
find_compatible_devices
(
const
char
*
type
,
const
char
*
compat
)
{
struct
device_node
*
head
,
**
prevp
,
*
np
;
prevp
=
&
head
;
for
(
np
=
allnodes
;
np
!=
0
;
np
=
np
->
allnext
)
{
if
(
type
!=
NULL
&&
!
(
np
->
type
!=
0
&&
strcasecmp
(
np
->
type
,
type
)
==
0
))
continue
;
if
(
device_is_compatible
(
np
,
compat
))
{
*
prevp
=
np
;
prevp
=
&
np
->
next
;
}
}
*
prevp
=
NULL
;
return
head
;
}
EXPORT_SYMBOL
(
find_compatible_devices
);
/**
* Find the device_node with a given full_name.
*/
struct
device_node
*
find_path_device
(
const
char
*
path
)
{
struct
device_node
*
np
;
for
(
np
=
allnodes
;
np
!=
0
;
np
=
np
->
allnext
)
if
(
np
->
full_name
!=
0
&&
strcasecmp
(
np
->
full_name
,
path
)
==
0
)
return
np
;
return
NULL
;
}
EXPORT_SYMBOL
(
find_path_device
);
/*******
*
* New implementation of the OF "find" APIs, return a refcounted
* object, call of_node_put() when done. The device tree and list
* are protected by a rw_lock.
*
* Note that property management will need some locking as well,
* this isn't dealt with yet.
*
*******/
/**
* of_find_node_by_name - Find a node by its "name" property
* @from: The node to start searching from or NULL, the node
* you pass will not be searched, only the next one
* will; typically, you pass what the previous call
* returned. of_node_put() will be called on it
* @name: The name string to match against
*
* Returns a node pointer with refcount incremented, use
* of_node_put() on it when done.
*/
struct
device_node
*
of_find_node_by_name
(
struct
device_node
*
from
,
const
char
*
name
)
{
struct
device_node
*
np
;
read_lock
(
&
devtree_lock
);
np
=
from
?
from
->
allnext
:
allnodes
;
for
(;
np
!=
0
;
np
=
np
->
allnext
)
if
(
np
->
name
!=
0
&&
strcasecmp
(
np
->
name
,
name
)
==
0
&&
of_node_get
(
np
))
break
;
if
(
from
)
of_node_put
(
from
);
read_unlock
(
&
devtree_lock
);
return
np
;
}
EXPORT_SYMBOL
(
of_find_node_by_name
);
/**
* of_find_node_by_type - Find a node by its "device_type" property
* @from: The node to start searching from or NULL, the node
* you pass will not be searched, only the next one
* will; typically, you pass what the previous call
* returned. of_node_put() will be called on it
* @name: The type string to match against
*
* Returns a node pointer with refcount incremented, use
* of_node_put() on it when done.
*/
struct
device_node
*
of_find_node_by_type
(
struct
device_node
*
from
,
const
char
*
type
)
{
struct
device_node
*
np
;
read_lock
(
&
devtree_lock
);
np
=
from
?
from
->
allnext
:
allnodes
;
for
(;
np
!=
0
;
np
=
np
->
allnext
)
if
(
np
->
type
!=
0
&&
strcasecmp
(
np
->
type
,
type
)
==
0
&&
of_node_get
(
np
))
break
;
if
(
from
)
of_node_put
(
from
);
read_unlock
(
&
devtree_lock
);
return
np
;
}
EXPORT_SYMBOL
(
of_find_node_by_type
);
/**
* of_find_compatible_node - Find a node based on type and one of the
* tokens in its "compatible" property
* @from: The node to start searching from or NULL, the node
* you pass will not be searched, only the next one
* will; typically, you pass what the previous call
* returned. of_node_put() will be called on it
* @type: The type string to match "device_type" or NULL to ignore
* @compatible: The string to match to one of the tokens in the device
* "compatible" list.
*
* Returns a node pointer with refcount incremented, use
* of_node_put() on it when done.
*/
struct
device_node
*
of_find_compatible_node
(
struct
device_node
*
from
,
const
char
*
type
,
const
char
*
compatible
)
{
struct
device_node
*
np
;
read_lock
(
&
devtree_lock
);
np
=
from
?
from
->
allnext
:
allnodes
;
for
(;
np
!=
0
;
np
=
np
->
allnext
)
{
if
(
type
!=
NULL
&&
!
(
np
->
type
!=
0
&&
strcasecmp
(
np
->
type
,
type
)
==
0
))
continue
;
if
(
device_is_compatible
(
np
,
compatible
)
&&
of_node_get
(
np
))
break
;
}
if
(
from
)
of_node_put
(
from
);
read_unlock
(
&
devtree_lock
);
return
np
;
}
EXPORT_SYMBOL
(
of_find_compatible_node
);
/**
* of_find_node_by_path - Find a node matching a full OF path
* @path: The full path to match
*
* Returns a node pointer with refcount incremented, use
* of_node_put() on it when done.
*/
struct
device_node
*
of_find_node_by_path
(
const
char
*
path
)
{
struct
device_node
*
np
=
allnodes
;
read_lock
(
&
devtree_lock
);
for
(;
np
!=
0
;
np
=
np
->
allnext
)
{
if
(
np
->
full_name
!=
0
&&
strcasecmp
(
np
->
full_name
,
path
)
==
0
&&
of_node_get
(
np
))
break
;
}
read_unlock
(
&
devtree_lock
);
return
np
;
}
EXPORT_SYMBOL
(
of_find_node_by_path
);
/**
* of_find_node_by_phandle - Find a node given a phandle
* @handle: phandle of the node to find
*
* Returns a node pointer with refcount incremented, use
* of_node_put() on it when done.
*/
struct
device_node
*
of_find_node_by_phandle
(
phandle
handle
)
{
struct
device_node
*
np
;
read_lock
(
&
devtree_lock
);
for
(
np
=
allnodes
;
np
!=
0
;
np
=
np
->
allnext
)
if
(
np
->
linux_phandle
==
handle
)
break
;
if
(
np
)
of_node_get
(
np
);
read_unlock
(
&
devtree_lock
);
return
np
;
}
EXPORT_SYMBOL
(
of_find_node_by_phandle
);
/**
* of_find_all_nodes - Get next node in global list
* @prev: Previous node or NULL to start iteration
* of_node_put() will be called on it
*
* Returns a node pointer with refcount incremented, use
* of_node_put() on it when done.
*/
struct
device_node
*
of_find_all_nodes
(
struct
device_node
*
prev
)
{
struct
device_node
*
np
;
read_lock
(
&
devtree_lock
);
np
=
prev
?
prev
->
allnext
:
allnodes
;
for
(;
np
!=
0
;
np
=
np
->
allnext
)
if
(
of_node_get
(
np
))
break
;
if
(
prev
)
of_node_put
(
prev
);
read_unlock
(
&
devtree_lock
);
return
np
;
}
EXPORT_SYMBOL
(
of_find_all_nodes
);
/**
* of_get_parent - Get a node's parent if any
* @node: Node to get parent
*
* Returns a node pointer with refcount incremented, use
* of_node_put() on it when done.
*/
struct
device_node
*
of_get_parent
(
const
struct
device_node
*
node
)
{
struct
device_node
*
np
;
if
(
!
node
)
return
NULL
;
read_lock
(
&
devtree_lock
);
np
=
of_node_get
(
node
->
parent
);
read_unlock
(
&
devtree_lock
);
return
np
;
}
EXPORT_SYMBOL
(
of_get_parent
);
/**
* of_get_next_child - Iterate a node childs
* @node: parent node
* @prev: previous child of the parent node, or NULL to get first
*
* Returns a node pointer with refcount incremented, use
* of_node_put() on it when done.
*/
struct
device_node
*
of_get_next_child
(
const
struct
device_node
*
node
,
struct
device_node
*
prev
)
{
struct
device_node
*
next
;
read_lock
(
&
devtree_lock
);
next
=
prev
?
prev
->
sibling
:
node
->
child
;
for
(;
next
!=
0
;
next
=
next
->
sibling
)
if
(
of_node_get
(
next
))
break
;
if
(
prev
)
of_node_put
(
prev
);
read_unlock
(
&
devtree_lock
);
return
next
;
}
EXPORT_SYMBOL
(
of_get_next_child
);
/**
* of_node_get - Increment refcount of a node
* @node: Node to inc refcount, NULL is supported to
* simplify writing of callers
*
* Returns node.
*/
struct
device_node
*
of_node_get
(
struct
device_node
*
node
)
{
if
(
node
)
kref_get
(
&
node
->
kref
);
return
node
;
}
EXPORT_SYMBOL
(
of_node_get
);
static
inline
struct
device_node
*
kref_to_device_node
(
struct
kref
*
kref
)
{
return
container_of
(
kref
,
struct
device_node
,
kref
);
}
/**
* of_node_release - release a dynamically allocated node
* @kref: kref element of the node to be released
*
* In of_node_put() this function is passed to kref_put()
* as the destructor.
*/
static
void
of_node_release
(
struct
kref
*
kref
)
{
struct
device_node
*
node
=
kref_to_device_node
(
kref
);
struct
property
*
prop
=
node
->
properties
;
if
(
!
OF_IS_DYNAMIC
(
node
))
return
;
while
(
prop
)
{
struct
property
*
next
=
prop
->
next
;
kfree
(
prop
->
name
);
kfree
(
prop
->
value
);
kfree
(
prop
);
prop
=
next
;
}
kfree
(
node
->
intrs
);
kfree
(
node
->
addrs
);
kfree
(
node
->
full_name
);
kfree
(
node
->
data
);
kfree
(
node
);
}
/**
* of_node_put - Decrement refcount of a node
* @node: Node to dec refcount, NULL is supported to
* simplify writing of callers
*
*/
void
of_node_put
(
struct
device_node
*
node
)
{
if
(
node
)
kref_put
(
&
node
->
kref
,
of_node_release
);
}
EXPORT_SYMBOL
(
of_node_put
);
/*
* Fix up the uninitialized fields in a new device node:
* name, type, n_addrs, addrs, n_intrs, intrs, and pci-specific fields
*
* A lot of boot-time code is duplicated here, because functions such
* as finish_node_interrupts, interpret_pci_props, etc. cannot use the
* slab allocator.
*
* This should probably be split up into smaller chunks.
*/
static
int
of_finish_dynamic_node
(
struct
device_node
*
node
,
unsigned
long
*
unused1
,
int
unused2
,
int
unused3
,
int
unused4
)
{
struct
device_node
*
parent
=
of_get_parent
(
node
);
int
err
=
0
;
phandle
*
ibm_phandle
;
node
->
name
=
get_property
(
node
,
"name"
,
NULL
);
node
->
type
=
get_property
(
node
,
"device_type"
,
NULL
);
if
(
!
parent
)
{
err
=
-
ENODEV
;
goto
out
;
}
/* We don't support that function on PowerMac, at least
* not yet
*/
if
(
_machine
==
PLATFORM_POWERMAC
)
return
-
ENODEV
;
/* fix up new node's linux_phandle field */
if
((
ibm_phandle
=
(
unsigned
int
*
)
get_property
(
node
,
"ibm,phandle"
,
NULL
)))
node
->
linux_phandle
=
*
ibm_phandle
;
out:
of_node_put
(
parent
);
return
err
;
}
/*
* Plug a device node into the tree and global list.
*/
void
of_attach_node
(
struct
device_node
*
np
)
{
write_lock
(
&
devtree_lock
);
np
->
sibling
=
np
->
parent
->
child
;
np
->
allnext
=
allnodes
;
np
->
parent
->
child
=
np
;
allnodes
=
np
;
write_unlock
(
&
devtree_lock
);
}
/*
* "Unplug" a node from the device tree. The caller must hold
* a reference to the node. The memory associated with the node
* is not freed until its refcount goes to zero.
*/
void
of_detach_node
(
const
struct
device_node
*
np
)
{
struct
device_node
*
parent
;
write_lock
(
&
devtree_lock
);
parent
=
np
->
parent
;
if
(
allnodes
==
np
)
allnodes
=
np
->
allnext
;
else
{
struct
device_node
*
prev
;
for
(
prev
=
allnodes
;
prev
->
allnext
!=
np
;
prev
=
prev
->
allnext
)
;
prev
->
allnext
=
np
->
allnext
;
}
if
(
parent
->
child
==
np
)
parent
->
child
=
np
->
sibling
;
else
{
struct
device_node
*
prevsib
;
for
(
prevsib
=
np
->
parent
->
child
;
prevsib
->
sibling
!=
np
;
prevsib
=
prevsib
->
sibling
)
;
prevsib
->
sibling
=
np
->
sibling
;
}
write_unlock
(
&
devtree_lock
);
}
static
int
prom_reconfig_notifier
(
struct
notifier_block
*
nb
,
unsigned
long
action
,
void
*
node
)
{
int
err
;
switch
(
action
)
{
case
PSERIES_RECONFIG_ADD
:
err
=
finish_node
(
node
,
NULL
,
of_finish_dynamic_node
,
0
,
0
,
0
);
if
(
err
<
0
)
{
printk
(
KERN_ERR
"finish_node returned %d
\n
"
,
err
);
err
=
NOTIFY_BAD
;
}
break
;
default:
err
=
NOTIFY_DONE
;
break
;
}
return
err
;
}
static
struct
notifier_block
prom_reconfig_nb
=
{
.
notifier_call
=
prom_reconfig_notifier
,
.
priority
=
10
,
/* This one needs to run first */
};
static
int
__init
prom_reconfig_setup
(
void
)
{
return
pSeries_reconfig_notifier_register
(
&
prom_reconfig_nb
);
}
__initcall
(
prom_reconfig_setup
);
/*
* Find a property with a given name for a given node
* and return the value.
*/
unsigned
char
*
get_property
(
struct
device_node
*
np
,
const
char
*
name
,
int
*
lenp
)
{
struct
property
*
pp
;
for
(
pp
=
np
->
properties
;
pp
!=
0
;
pp
=
pp
->
next
)
if
(
strcmp
(
pp
->
name
,
name
)
==
0
)
{
if
(
lenp
!=
0
)
*
lenp
=
pp
->
length
;
return
pp
->
value
;
}
return
NULL
;
}
EXPORT_SYMBOL
(
get_property
);
/*
* Add a property to a node.
*/
int
prom_add_property
(
struct
device_node
*
np
,
struct
property
*
prop
)
{
struct
property
**
next
;
prop
->
next
=
NULL
;
write_lock
(
&
devtree_lock
);
next
=
&
np
->
properties
;
while
(
*
next
)
{
if
(
strcmp
(
prop
->
name
,
(
*
next
)
->
name
)
==
0
)
{
/* duplicate ! don't insert it */
write_unlock
(
&
devtree_lock
);
return
-
1
;
}
next
=
&
(
*
next
)
->
next
;
}
*
next
=
prop
;
write_unlock
(
&
devtree_lock
);
/* try to add to proc as well if it was initialized */
if
(
np
->
pde
)
proc_device_tree_add_prop
(
np
->
pde
,
prop
);
return
0
;
}
#if 0
void
print_properties(struct device_node *np)
{
struct property *pp;
char *cp;
int i, n;
for (pp = np->properties; pp != 0; pp = pp->next) {
printk(KERN_INFO "%s", pp->name);
for (i = strlen(pp->name); i < 16; ++i)
printk(" ");
cp = (char *) pp->value;
for (i = pp->length; i > 0; --i, ++cp)
if ((i > 1 && (*cp < 0x20 || *cp > 0x7e))
|| (i == 1 && *cp != 0))
break;
if (i == 0 && pp->length > 1) {
/* looks like a string */
printk(" %s\n", (char *) pp->value);
} else {
/* dump it in hex */
n = pp->length;
if (n > 64)
n = 64;
if (pp->length % 4 == 0) {
unsigned int *p = (unsigned int *) pp->value;
n /= 4;
for (i = 0; i < n; ++i) {
if (i != 0 && (i % 4) == 0)
printk("\n ");
printk(" %08x", *p++);
}
} else {
unsigned char *bp = pp->value;
for (i = 0; i < n; ++i) {
if (i != 0 && (i % 16) == 0)
printk("\n ");
printk(" %02x", *bp++);
}
}
printk("\n");
if (pp->length > 64)
printk(" ... (length = %d)\n",
pp->length);
}
}
}
#endif
arch/ppc64/kernel/prom_init.c
deleted
100644 → 0
View file @
302fe175
/*
*
*
* Procedures for interfacing to Open Firmware.
*
* Paul Mackerras August 1996.
* Copyright (C) 1996 Paul Mackerras.
*
* Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
* {engebret|bergner}@us.ibm.com
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#undef DEBUG_PROM
#include <stdarg.h>
#include <linux/config.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/threads.h>
#include <linux/spinlock.h>
#include <linux/types.h>
#include <linux/pci.h>
#include <linux/proc_fs.h>
#include <linux/stringify.h>
#include <linux/delay.h>
#include <linux/initrd.h>
#include <linux/bitops.h>
#include <asm/prom.h>
#include <asm/rtas.h>
#include <asm/abs_addr.h>
#include <asm/page.h>
#include <asm/processor.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/smp.h>
#include <asm/system.h>
#include <asm/mmu.h>
#include <asm/pgtable.h>
#include <asm/pci.h>
#include <asm/iommu.h>
#include <asm/btext.h>
#include <asm/sections.h>
#include <asm/machdep.h>
#ifdef CONFIG_LOGO_LINUX_CLUT224
#include <linux/linux_logo.h>
extern
const
struct
linux_logo
logo_linux_clut224
;
#endif
/*
* Properties whose value is longer than this get excluded from our
* copy of the device tree. This value does need to be big enough to
* ensure that we don't lose things like the interrupt-map property
* on a PCI-PCI bridge.
*/
#define MAX_PROPERTY_LENGTH (1UL * 1024 * 1024)
/*
* Eventually bump that one up
*/
#define DEVTREE_CHUNK_SIZE 0x100000
/*
* This is the size of the local memory reserve map that gets copied
* into the boot params passed to the kernel. That size is totally
* flexible as the kernel just reads the list until it encounters an
* entry with size 0, so it can be changed without breaking binary
* compatibility
*/
#define MEM_RESERVE_MAP_SIZE 8
/*
* prom_init() is called very early on, before the kernel text
* and data have been mapped to KERNELBASE. At this point the code
* is running at whatever address it has been loaded at, so
* references to extern and static variables must be relocated
* explicitly. The procedure reloc_offset() returns the address
* we're currently running at minus the address we were linked at.
* (Note that strings count as static variables.)
*
* Because OF may have mapped I/O devices into the area starting at
* KERNELBASE, particularly on CHRP machines, we can't safely call
* OF once the kernel has been mapped to KERNELBASE. Therefore all
* OF calls should be done within prom_init(), and prom_init()
* and all routines called within it must be careful to relocate
* references as necessary.
*
* Note that the bss is cleared *after* prom_init runs, so we have
* to make sure that any static or extern variables it accesses
* are put in the data segment.
*/
#define PROM_BUG() do { \
prom_printf("kernel BUG at %s line 0x%x!\n", \
RELOC(__FILE__), __LINE__); \
__asm__ __volatile__(".long " BUG_ILLEGAL_INSTR); \
} while (0)
#ifdef DEBUG_PROM
#define prom_debug(x...) prom_printf(x)
#else
#define prom_debug(x...)
#endif
typedef
u32
prom_arg_t
;
struct
prom_args
{
u32
service
;
u32
nargs
;
u32
nret
;
prom_arg_t
args
[
10
];
prom_arg_t
*
rets
;
/* Pointer to return values in args[16]. */
};
struct
prom_t
{
unsigned
long
entry
;
ihandle
root
;
ihandle
chosen
;
int
cpu
;
ihandle
stdout
;
ihandle
disp_node
;
struct
prom_args
args
;
unsigned
long
version
;
unsigned
long
root_size_cells
;
unsigned
long
root_addr_cells
;
};
struct
pci_reg_property
{
struct
pci_address
addr
;
u32
size_hi
;
u32
size_lo
;
};
struct
mem_map_entry
{
u64
base
;
u64
size
;
};
typedef
u32
cell_t
;
extern
void
__start
(
unsigned
long
r3
,
unsigned
long
r4
,
unsigned
long
r5
);
extern
void
enter_prom
(
struct
prom_args
*
args
,
unsigned
long
entry
);
extern
void
copy_and_flush
(
unsigned
long
dest
,
unsigned
long
src
,
unsigned
long
size
,
unsigned
long
offset
);
extern
unsigned
long
klimit
;
/* prom structure */
static
struct
prom_t
__initdata
prom
;
#define PROM_SCRATCH_SIZE 256
static
char
__initdata
of_stdout_device
[
256
];
static
char
__initdata
prom_scratch
[
PROM_SCRATCH_SIZE
];
static
unsigned
long
__initdata
dt_header_start
;
static
unsigned
long
__initdata
dt_struct_start
,
dt_struct_end
;
static
unsigned
long
__initdata
dt_string_start
,
dt_string_end
;
static
unsigned
long
__initdata
prom_initrd_start
,
prom_initrd_end
;
static
int
__initdata
iommu_force_on
;
static
int
__initdata
ppc64_iommu_off
;
static
int
__initdata
of_platform
;
static
char
__initdata
prom_cmd_line
[
COMMAND_LINE_SIZE
];
static
unsigned
long
__initdata
prom_memory_limit
;
static
unsigned
long
__initdata
prom_tce_alloc_start
;
static
unsigned
long
__initdata
prom_tce_alloc_end
;
static
unsigned
long
__initdata
alloc_top
;
static
unsigned
long
__initdata
alloc_top_high
;
static
unsigned
long
__initdata
alloc_bottom
;
static
unsigned
long
__initdata
rmo_top
;
static
unsigned
long
__initdata
ram_top
;
static
struct
mem_map_entry
__initdata
mem_reserve_map
[
MEM_RESERVE_MAP_SIZE
];
static
int
__initdata
mem_reserve_cnt
;
static
cell_t
__initdata
regbuf
[
1024
];
#define MAX_CPU_THREADS 2
/* TO GO */
#ifdef CONFIG_HMT
struct
{
unsigned
int
pir
;
unsigned
int
threadid
;
}
hmt_thread_data
[
NR_CPUS
];
#endif
/* CONFIG_HMT */
/*
* This are used in calls to call_prom. The 4th and following
* arguments to call_prom should be 32-bit values. 64 bit values
* are truncated to 32 bits (and fortunately don't get interpreted
* as two arguments).
*/
#define ADDR(x) (u32) ((unsigned long)(x) - offset)
/*
* Error results ... some OF calls will return "-1" on error, some
* will return 0, some will return either. To simplify, here are
* macros to use with any ihandle or phandle return value to check if
* it is valid
*/
#define PROM_ERROR (-1u)
#define PHANDLE_VALID(p) ((p) != 0 && (p) != PROM_ERROR)
#define IHANDLE_VALID(i) ((i) != 0 && (i) != PROM_ERROR)
/* This is the one and *ONLY* place where we actually call open
* firmware from, since we need to make sure we're running in 32b
* mode when we do. We switch back to 64b mode upon return.
*/
static
int
__init
call_prom
(
const
char
*
service
,
int
nargs
,
int
nret
,
...)
{
int
i
;
unsigned
long
offset
=
reloc_offset
();
struct
prom_t
*
_prom
=
PTRRELOC
(
&
prom
);
va_list
list
;
_prom
->
args
.
service
=
ADDR
(
service
);
_prom
->
args
.
nargs
=
nargs
;
_prom
->
args
.
nret
=
nret
;
_prom
->
args
.
rets
=
(
prom_arg_t
*
)
&
(
_prom
->
args
.
args
[
nargs
]);
va_start
(
list
,
nret
);
for
(
i
=
0
;
i
<
nargs
;
i
++
)
_prom
->
args
.
args
[
i
]
=
va_arg
(
list
,
prom_arg_t
);
va_end
(
list
);
for
(
i
=
0
;
i
<
nret
;
i
++
)
_prom
->
args
.
rets
[
i
]
=
0
;
enter_prom
(
&
_prom
->
args
,
_prom
->
entry
);
return
(
nret
>
0
)
?
_prom
->
args
.
rets
[
0
]
:
0
;
}
static
unsigned
int
__init
prom_claim
(
unsigned
long
virt
,
unsigned
long
size
,
unsigned
long
align
)
{
return
(
unsigned
int
)
call_prom
(
"claim"
,
3
,
1
,
(
prom_arg_t
)
virt
,
(
prom_arg_t
)
size
,
(
prom_arg_t
)
align
);
}
static
void
__init
prom_print
(
const
char
*
msg
)
{
const
char
*
p
,
*
q
;
unsigned
long
offset
=
reloc_offset
();
struct
prom_t
*
_prom
=
PTRRELOC
(
&
prom
);
if
(
_prom
->
stdout
==
0
)
return
;
for
(
p
=
msg
;
*
p
!=
0
;
p
=
q
)
{
for
(
q
=
p
;
*
q
!=
0
&&
*
q
!=
'\n'
;
++
q
)
;
if
(
q
>
p
)
call_prom
(
"write"
,
3
,
1
,
_prom
->
stdout
,
p
,
q
-
p
);
if
(
*
q
==
0
)
break
;
++
q
;
call_prom
(
"write"
,
3
,
1
,
_prom
->
stdout
,
ADDR
(
"
\r\n
"
),
2
);
}
}
static
void
__init
prom_print_hex
(
unsigned
long
val
)
{
unsigned
long
offset
=
reloc_offset
();
int
i
,
nibbles
=
sizeof
(
val
)
*
2
;
char
buf
[
sizeof
(
val
)
*
2
+
1
];
struct
prom_t
*
_prom
=
PTRRELOC
(
&
prom
);
for
(
i
=
nibbles
-
1
;
i
>=
0
;
i
--
)
{
buf
[
i
]
=
(
val
&
0xf
)
+
'0'
;
if
(
buf
[
i
]
>
'9'
)
buf
[
i
]
+=
(
'a'
-
'0'
-
10
);
val
>>=
4
;
}
buf
[
nibbles
]
=
'\0'
;
call_prom
(
"write"
,
3
,
1
,
_prom
->
stdout
,
buf
,
nibbles
);
}
static
void
__init
prom_printf
(
const
char
*
format
,
...)
{
unsigned
long
offset
=
reloc_offset
();
const
char
*
p
,
*
q
,
*
s
;
va_list
args
;
unsigned
long
v
;
struct
prom_t
*
_prom
=
PTRRELOC
(
&
prom
);
va_start
(
args
,
format
);
for
(
p
=
PTRRELOC
(
format
);
*
p
!=
0
;
p
=
q
)
{
for
(
q
=
p
;
*
q
!=
0
&&
*
q
!=
'\n'
&&
*
q
!=
'%'
;
++
q
)
;
if
(
q
>
p
)
call_prom
(
"write"
,
3
,
1
,
_prom
->
stdout
,
p
,
q
-
p
);
if
(
*
q
==
0
)
break
;
if
(
*
q
==
'\n'
)
{
++
q
;
call_prom
(
"write"
,
3
,
1
,
_prom
->
stdout
,
ADDR
(
"
\r\n
"
),
2
);
continue
;
}
++
q
;
if
(
*
q
==
0
)
break
;
switch
(
*
q
)
{
case
's'
:
++
q
;
s
=
va_arg
(
args
,
const
char
*
);
prom_print
(
s
);
break
;
case
'x'
:
++
q
;
v
=
va_arg
(
args
,
unsigned
long
);
prom_print_hex
(
v
);
break
;
}
}
}
static
void
__init
__attribute__
((
noreturn
))
prom_panic
(
const
char
*
reason
)
{
unsigned
long
offset
=
reloc_offset
();
prom_print
(
PTRRELOC
(
reason
));
/* ToDo: should put up an SRC here */
call_prom
(
"exit"
,
0
,
0
);
for
(;;)
/* should never get here */
;
}
static
int
__init
prom_next_node
(
phandle
*
nodep
)
{
phandle
node
;
if
((
node
=
*
nodep
)
!=
0
&&
(
*
nodep
=
call_prom
(
"child"
,
1
,
1
,
node
))
!=
0
)
return
1
;
if
((
*
nodep
=
call_prom
(
"peer"
,
1
,
1
,
node
))
!=
0
)
return
1
;
for
(;;)
{
if
((
node
=
call_prom
(
"parent"
,
1
,
1
,
node
))
==
0
)
return
0
;
if
((
*
nodep
=
call_prom
(
"peer"
,
1
,
1
,
node
))
!=
0
)
return
1
;
}
}
static
int
__init
prom_getprop
(
phandle
node
,
const
char
*
pname
,
void
*
value
,
size_t
valuelen
)
{
unsigned
long
offset
=
reloc_offset
();
return
call_prom
(
"getprop"
,
4
,
1
,
node
,
ADDR
(
pname
),
(
u32
)(
unsigned
long
)
value
,
(
u32
)
valuelen
);
}
static
int
__init
prom_getproplen
(
phandle
node
,
const
char
*
pname
)
{
unsigned
long
offset
=
reloc_offset
();
return
call_prom
(
"getproplen"
,
2
,
1
,
node
,
ADDR
(
pname
));
}
static
int
__init
prom_setprop
(
phandle
node
,
const
char
*
pname
,
void
*
value
,
size_t
valuelen
)
{
unsigned
long
offset
=
reloc_offset
();
return
call_prom
(
"setprop"
,
4
,
1
,
node
,
ADDR
(
pname
),
(
u32
)(
unsigned
long
)
value
,
(
u32
)
valuelen
);
}
/* We can't use the standard versions because of RELOC headaches. */
#define isxdigit(c) (('0' <= (c) && (c) <= '9') \
|| ('a' <= (c) && (c) <= 'f') \
|| ('A' <= (c) && (c) <= 'F'))
#define isdigit(c) ('0' <= (c) && (c) <= '9')
#define islower(c) ('a' <= (c) && (c) <= 'z')
#define toupper(c) (islower(c) ? ((c) - 'a' + 'A') : (c))
unsigned
long
prom_strtoul
(
const
char
*
cp
,
const
char
**
endp
)
{
unsigned
long
result
=
0
,
base
=
10
,
value
;
if
(
*
cp
==
'0'
)
{
base
=
8
;
cp
++
;
if
(
toupper
(
*
cp
)
==
'X'
)
{
cp
++
;
base
=
16
;
}
}
while
(
isxdigit
(
*
cp
)
&&
(
value
=
isdigit
(
*
cp
)
?
*
cp
-
'0'
:
toupper
(
*
cp
)
-
'A'
+
10
)
<
base
)
{
result
=
result
*
base
+
value
;
cp
++
;
}
if
(
endp
)
*
endp
=
cp
;
return
result
;
}
unsigned
long
prom_memparse
(
const
char
*
ptr
,
const
char
**
retptr
)
{
unsigned
long
ret
=
prom_strtoul
(
ptr
,
retptr
);
int
shift
=
0
;
/*
* We can't use a switch here because GCC *may* generate a
* jump table which won't work, because we're not running at
* the address we're linked at.
*/
if
(
'G'
==
**
retptr
||
'g'
==
**
retptr
)
shift
=
30
;
if
(
'M'
==
**
retptr
||
'm'
==
**
retptr
)
shift
=
20
;
if
(
'K'
==
**
retptr
||
'k'
==
**
retptr
)
shift
=
10
;
if
(
shift
)
{
ret
<<=
shift
;
(
*
retptr
)
++
;
}
return
ret
;
}
/*
* Early parsing of the command line passed to the kernel, used for
* "mem=x" and the options that affect the iommu
*/
static
void
__init
early_cmdline_parse
(
void
)
{
unsigned
long
offset
=
reloc_offset
();
struct
prom_t
*
_prom
=
PTRRELOC
(
&
prom
);
char
*
opt
,
*
p
;
int
l
=
0
;
RELOC
(
prom_cmd_line
[
0
])
=
0
;
p
=
RELOC
(
prom_cmd_line
);
if
((
long
)
_prom
->
chosen
>
0
)
l
=
prom_getprop
(
_prom
->
chosen
,
"bootargs"
,
p
,
COMMAND_LINE_SIZE
-
1
);
#ifdef CONFIG_CMDLINE
if
(
l
==
0
)
/* dbl check */
strlcpy
(
RELOC
(
prom_cmd_line
),
RELOC
(
CONFIG_CMDLINE
),
sizeof
(
prom_cmd_line
));
#endif
/* CONFIG_CMDLINE */
prom_printf
(
"command line: %s
\n
"
,
RELOC
(
prom_cmd_line
));
opt
=
strstr
(
RELOC
(
prom_cmd_line
),
RELOC
(
"iommu="
));
if
(
opt
)
{
prom_printf
(
"iommu opt is: %s
\n
"
,
opt
);
opt
+=
6
;
while
(
*
opt
&&
*
opt
==
' '
)
opt
++
;
if
(
!
strncmp
(
opt
,
RELOC
(
"off"
),
3
))
RELOC
(
ppc64_iommu_off
)
=
1
;
else
if
(
!
strncmp
(
opt
,
RELOC
(
"force"
),
5
))
RELOC
(
iommu_force_on
)
=
1
;
}
opt
=
strstr
(
RELOC
(
prom_cmd_line
),
RELOC
(
"mem="
));
if
(
opt
)
{
opt
+=
4
;
RELOC
(
prom_memory_limit
)
=
prom_memparse
(
opt
,
(
const
char
**
)
&
opt
);
/* Align to 16 MB == size of large page */
RELOC
(
prom_memory_limit
)
=
ALIGN
(
RELOC
(
prom_memory_limit
),
0x1000000
);
}
}
/*
* To tell the firmware what our capabilities are, we have to pass
* it a fake 32-bit ELF header containing a couple of PT_NOTE sections
* that contain structures that contain the actual values.
*/
static
struct
fake_elf
{
Elf32_Ehdr
elfhdr
;
Elf32_Phdr
phdr
[
2
];
struct
chrpnote
{
u32
namesz
;
u32
descsz
;
u32
type
;
char
name
[
8
];
/* "PowerPC" */
struct
chrpdesc
{
u32
real_mode
;
u32
real_base
;
u32
real_size
;
u32
virt_base
;
u32
virt_size
;
u32
load_base
;
}
chrpdesc
;
}
chrpnote
;
struct
rpanote
{
u32
namesz
;
u32
descsz
;
u32
type
;
char
name
[
24
];
/* "IBM,RPA-Client-Config" */
struct
rpadesc
{
u32
lpar_affinity
;
u32
min_rmo_size
;
u32
min_rmo_percent
;
u32
max_pft_size
;
u32
splpar
;
u32
min_load
;
u32
new_mem_def
;
u32
ignore_me
;
}
rpadesc
;
}
rpanote
;
}
fake_elf
=
{
.
elfhdr
=
{
.
e_ident
=
{
0x7f
,
'E'
,
'L'
,
'F'
,
ELFCLASS32
,
ELFDATA2MSB
,
EV_CURRENT
},
.
e_type
=
ET_EXEC
,
/* yeah right */
.
e_machine
=
EM_PPC
,
.
e_version
=
EV_CURRENT
,
.
e_phoff
=
offsetof
(
struct
fake_elf
,
phdr
),
.
e_phentsize
=
sizeof
(
Elf32_Phdr
),
.
e_phnum
=
2
},
.
phdr
=
{
[
0
]
=
{
.
p_type
=
PT_NOTE
,
.
p_offset
=
offsetof
(
struct
fake_elf
,
chrpnote
),
.
p_filesz
=
sizeof
(
struct
chrpnote
)
},
[
1
]
=
{
.
p_type
=
PT_NOTE
,
.
p_offset
=
offsetof
(
struct
fake_elf
,
rpanote
),
.
p_filesz
=
sizeof
(
struct
rpanote
)
}
},
.
chrpnote
=
{
.
namesz
=
sizeof
(
"PowerPC"
),
.
descsz
=
sizeof
(
struct
chrpdesc
),
.
type
=
0x1275
,
.
name
=
"PowerPC"
,
.
chrpdesc
=
{
.
real_mode
=
~
0U
,
/* ~0 means "don't care" */
.
real_base
=
~
0U
,
.
real_size
=
~
0U
,
.
virt_base
=
~
0U
,
.
virt_size
=
~
0U
,
.
load_base
=
~
0U
},
},
.
rpanote
=
{
.
namesz
=
sizeof
(
"IBM,RPA-Client-Config"
),
.
descsz
=
sizeof
(
struct
rpadesc
),
.
type
=
0x12759999
,
.
name
=
"IBM,RPA-Client-Config"
,
.
rpadesc
=
{
.
lpar_affinity
=
0
,
.
min_rmo_size
=
64
,
/* in megabytes */
.
min_rmo_percent
=
0
,
.
max_pft_size
=
48
,
/* 2^48 bytes max PFT size */
.
splpar
=
1
,
.
min_load
=
~
0U
,
.
new_mem_def
=
0
}
}
};
static
void
__init
prom_send_capabilities
(
void
)
{
unsigned
long
offset
=
reloc_offset
();
ihandle
elfloader
;
elfloader
=
call_prom
(
"open"
,
1
,
1
,
ADDR
(
"/packages/elf-loader"
));
if
(
elfloader
==
0
)
{
prom_printf
(
"couldn't open /packages/elf-loader
\n
"
);
return
;
}
call_prom
(
"call-method"
,
3
,
1
,
ADDR
(
"process-elf-header"
),
elfloader
,
ADDR
(
&
fake_elf
));
call_prom
(
"close"
,
1
,
0
,
elfloader
);
}
/*
* Memory allocation strategy... our layout is normally:
*
* at 14Mb or more we vmlinux, then a gap and initrd. In some rare cases, initrd
* might end up beeing before the kernel though. We assume this won't override
* the final kernel at 0, we have no provision to handle that in this version,
* but it should hopefully never happen.
*
* alloc_top is set to the top of RMO, eventually shrink down if the TCEs overlap
* alloc_bottom is set to the top of kernel/initrd
*
* from there, allocations are done that way : rtas is allocated topmost, and
* the device-tree is allocated from the bottom. We try to grow the device-tree
* allocation as we progress. If we can't, then we fail, we don't currently have
* a facility to restart elsewhere, but that shouldn't be necessary neither
*
* Note that calls to reserve_mem have to be done explicitely, memory allocated
* with either alloc_up or alloc_down isn't automatically reserved.
*/
/*
* Allocates memory in the RMO upward from the kernel/initrd
*
* When align is 0, this is a special case, it means to allocate in place
* at the current location of alloc_bottom or fail (that is basically
* extending the previous allocation). Used for the device-tree flattening
*/
static
unsigned
long
__init
alloc_up
(
unsigned
long
size
,
unsigned
long
align
)
{
unsigned
long
offset
=
reloc_offset
();
unsigned
long
base
=
_ALIGN_UP
(
RELOC
(
alloc_bottom
),
align
);
unsigned
long
addr
=
0
;
prom_debug
(
"alloc_up(%x, %x)
\n
"
,
size
,
align
);
if
(
RELOC
(
ram_top
)
==
0
)
prom_panic
(
"alloc_up() called with mem not initialized
\n
"
);
if
(
align
)
base
=
_ALIGN_UP
(
RELOC
(
alloc_bottom
),
align
);
else
base
=
RELOC
(
alloc_bottom
);
for
(;
(
base
+
size
)
<=
RELOC
(
alloc_top
);
base
=
_ALIGN_UP
(
base
+
0x100000
,
align
))
{
prom_debug
(
" trying: 0x%x
\n\r
"
,
base
);
addr
=
(
unsigned
long
)
prom_claim
(
base
,
size
,
0
);
if
(
addr
!=
PROM_ERROR
)
break
;
addr
=
0
;
if
(
align
==
0
)
break
;
}
if
(
addr
==
0
)
return
0
;
RELOC
(
alloc_bottom
)
=
addr
;
prom_debug
(
" -> %x
\n
"
,
addr
);
prom_debug
(
" alloc_bottom : %x
\n
"
,
RELOC
(
alloc_bottom
));
prom_debug
(
" alloc_top : %x
\n
"
,
RELOC
(
alloc_top
));
prom_debug
(
" alloc_top_hi : %x
\n
"
,
RELOC
(
alloc_top_high
));
prom_debug
(
" rmo_top : %x
\n
"
,
RELOC
(
rmo_top
));
prom_debug
(
" ram_top : %x
\n
"
,
RELOC
(
ram_top
));
return
addr
;
}
/*
* Allocates memory downard, either from top of RMO, or if highmem
* is set, from the top of RAM. Note that this one doesn't handle
* failures. In does claim memory if highmem is not set.
*/
static
unsigned
long
__init
alloc_down
(
unsigned
long
size
,
unsigned
long
align
,
int
highmem
)
{
unsigned
long
offset
=
reloc_offset
();
unsigned
long
base
,
addr
=
0
;
prom_debug
(
"alloc_down(%x, %x, %s)
\n
"
,
size
,
align
,
highmem
?
RELOC
(
"(high)"
)
:
RELOC
(
"(low)"
));
if
(
RELOC
(
ram_top
)
==
0
)
prom_panic
(
"alloc_down() called with mem not initialized
\n
"
);
if
(
highmem
)
{
/* Carve out storage for the TCE table. */
addr
=
_ALIGN_DOWN
(
RELOC
(
alloc_top_high
)
-
size
,
align
);
if
(
addr
<=
RELOC
(
alloc_bottom
))
return
0
;
else
{
/* Will we bump into the RMO ? If yes, check out that we
* didn't overlap existing allocations there, if we did,
* we are dead, we must be the first in town !
*/
if
(
addr
<
RELOC
(
rmo_top
))
{
/* Good, we are first */
if
(
RELOC
(
alloc_top
)
==
RELOC
(
rmo_top
))
RELOC
(
alloc_top
)
=
RELOC
(
rmo_top
)
=
addr
;
else
return
0
;
}
RELOC
(
alloc_top_high
)
=
addr
;
}
goto
bail
;
}
base
=
_ALIGN_DOWN
(
RELOC
(
alloc_top
)
-
size
,
align
);
for
(;
base
>
RELOC
(
alloc_bottom
);
base
=
_ALIGN_DOWN
(
base
-
0x100000
,
align
))
{
prom_debug
(
" trying: 0x%x
\n\r
"
,
base
);
addr
=
(
unsigned
long
)
prom_claim
(
base
,
size
,
0
);
if
(
addr
!=
PROM_ERROR
)
break
;
addr
=
0
;
}
if
(
addr
==
0
)
return
0
;
RELOC
(
alloc_top
)
=
addr
;
bail:
prom_debug
(
" -> %x
\n
"
,
addr
);
prom_debug
(
" alloc_bottom : %x
\n
"
,
RELOC
(
alloc_bottom
));
prom_debug
(
" alloc_top : %x
\n
"
,
RELOC
(
alloc_top
));
prom_debug
(
" alloc_top_hi : %x
\n
"
,
RELOC
(
alloc_top_high
));
prom_debug
(
" rmo_top : %x
\n
"
,
RELOC
(
rmo_top
));
prom_debug
(
" ram_top : %x
\n
"
,
RELOC
(
ram_top
));
return
addr
;
}
/*
* Parse a "reg" cell
*/
static
unsigned
long
__init
prom_next_cell
(
int
s
,
cell_t
**
cellp
)
{
cell_t
*
p
=
*
cellp
;
unsigned
long
r
=
0
;
/* Ignore more than 2 cells */
while
(
s
>
2
)
{
p
++
;
s
--
;
}
while
(
s
)
{
r
<<=
32
;
r
|=
*
(
p
++
);
s
--
;
}
*
cellp
=
p
;
return
r
;
}
/*
* Very dumb function for adding to the memory reserve list, but
* we don't need anything smarter at this point
*
* XXX Eventually check for collisions. They should NEVER happen
* if problems seem to show up, it would be a good start to track
* them down.
*/
static
void
reserve_mem
(
unsigned
long
base
,
unsigned
long
size
)
{
unsigned
long
offset
=
reloc_offset
();
unsigned
long
top
=
base
+
size
;
unsigned
long
cnt
=
RELOC
(
mem_reserve_cnt
);
if
(
size
==
0
)
return
;
/* We need to always keep one empty entry so that we
* have our terminator with "size" set to 0 since we are
* dumb and just copy this entire array to the boot params
*/
base
=
_ALIGN_DOWN
(
base
,
PAGE_SIZE
);
top
=
_ALIGN_UP
(
top
,
PAGE_SIZE
);
size
=
top
-
base
;
if
(
cnt
>=
(
MEM_RESERVE_MAP_SIZE
-
1
))
prom_panic
(
"Memory reserve map exhausted !
\n
"
);
RELOC
(
mem_reserve_map
)[
cnt
].
base
=
base
;
RELOC
(
mem_reserve_map
)[
cnt
].
size
=
size
;
RELOC
(
mem_reserve_cnt
)
=
cnt
+
1
;
}
/*
* Initialize memory allocation mecanism, parse "memory" nodes and
* obtain that way the top of memory and RMO to setup out local allocator
*/
static
void
__init
prom_init_mem
(
void
)
{
phandle
node
;
char
*
path
,
type
[
64
];
unsigned
int
plen
;
cell_t
*
p
,
*
endp
;
unsigned
long
offset
=
reloc_offset
();
struct
prom_t
*
_prom
=
PTRRELOC
(
&
prom
);
/*
* We iterate the memory nodes to find
* 1) top of RMO (first node)
* 2) top of memory
*/
prom_debug
(
"root_addr_cells: %x
\n
"
,
(
long
)
_prom
->
root_addr_cells
);
prom_debug
(
"root_size_cells: %x
\n
"
,
(
long
)
_prom
->
root_size_cells
);
prom_debug
(
"scanning memory:
\n
"
);
path
=
RELOC
(
prom_scratch
);
for
(
node
=
0
;
prom_next_node
(
&
node
);
)
{
type
[
0
]
=
0
;
prom_getprop
(
node
,
"device_type"
,
type
,
sizeof
(
type
));
if
(
strcmp
(
type
,
RELOC
(
"memory"
)))
continue
;
plen
=
prom_getprop
(
node
,
"reg"
,
RELOC
(
regbuf
),
sizeof
(
regbuf
));
if
(
plen
>
sizeof
(
regbuf
))
{
prom_printf
(
"memory node too large for buffer !
\n
"
);
plen
=
sizeof
(
regbuf
);
}
p
=
RELOC
(
regbuf
);
endp
=
p
+
(
plen
/
sizeof
(
cell_t
));
#ifdef DEBUG_PROM
memset
(
path
,
0
,
PROM_SCRATCH_SIZE
);
call_prom
(
"package-to-path"
,
3
,
1
,
node
,
path
,
PROM_SCRATCH_SIZE
-
1
);
prom_debug
(
" node %s :
\n
"
,
path
);
#endif
/* DEBUG_PROM */
while
((
endp
-
p
)
>=
(
_prom
->
root_addr_cells
+
_prom
->
root_size_cells
))
{
unsigned
long
base
,
size
;
base
=
prom_next_cell
(
_prom
->
root_addr_cells
,
&
p
);
size
=
prom_next_cell
(
_prom
->
root_size_cells
,
&
p
);
if
(
size
==
0
)
continue
;
prom_debug
(
" %x %x
\n
"
,
base
,
size
);
if
(
base
==
0
)
RELOC
(
rmo_top
)
=
size
;
if
((
base
+
size
)
>
RELOC
(
ram_top
))
RELOC
(
ram_top
)
=
base
+
size
;
}
}
RELOC
(
alloc_bottom
)
=
PAGE_ALIGN
(
RELOC
(
klimit
)
-
offset
+
0x4000
);
/* Check if we have an initrd after the kernel, if we do move our bottom
* point to after it
*/
if
(
RELOC
(
prom_initrd_start
))
{
if
(
RELOC
(
prom_initrd_end
)
>
RELOC
(
alloc_bottom
))
RELOC
(
alloc_bottom
)
=
PAGE_ALIGN
(
RELOC
(
prom_initrd_end
));
}
/*
* If prom_memory_limit is set we reduce the upper limits *except* for
* alloc_top_high. This must be the real top of RAM so we can put
* TCE's up there.
*/
RELOC
(
alloc_top_high
)
=
RELOC
(
ram_top
);
if
(
RELOC
(
prom_memory_limit
))
{
if
(
RELOC
(
prom_memory_limit
)
<=
RELOC
(
alloc_bottom
))
{
prom_printf
(
"Ignoring mem=%x <= alloc_bottom.
\n
"
,
RELOC
(
prom_memory_limit
));
RELOC
(
prom_memory_limit
)
=
0
;
}
else
if
(
RELOC
(
prom_memory_limit
)
>=
RELOC
(
ram_top
))
{
prom_printf
(
"Ignoring mem=%x >= ram_top.
\n
"
,
RELOC
(
prom_memory_limit
));
RELOC
(
prom_memory_limit
)
=
0
;
}
else
{
RELOC
(
ram_top
)
=
RELOC
(
prom_memory_limit
);
RELOC
(
rmo_top
)
=
min
(
RELOC
(
rmo_top
),
RELOC
(
prom_memory_limit
));
}
}
/*
* Setup our top alloc point, that is top of RMO or top of
* segment 0 when running non-LPAR.
*/
if
(
RELOC
(
of_platform
)
==
PLATFORM_PSERIES_LPAR
)
RELOC
(
alloc_top
)
=
RELOC
(
rmo_top
);
else
/* Some RS64 machines have buggy firmware where claims up at 1GB
* fails. Cap at 768MB as a workaround. Still plenty of room.
*/
RELOC
(
alloc_top
)
=
RELOC
(
rmo_top
)
=
min
(
0x30000000ul
,
RELOC
(
ram_top
));
prom_printf
(
"memory layout at init:
\n
"
);
prom_printf
(
" memory_limit : %x (16 MB aligned)
\n
"
,
RELOC
(
prom_memory_limit
));
prom_printf
(
" alloc_bottom : %x
\n
"
,
RELOC
(
alloc_bottom
));
prom_printf
(
" alloc_top : %x
\n
"
,
RELOC
(
alloc_top
));
prom_printf
(
" alloc_top_hi : %x
\n
"
,
RELOC
(
alloc_top_high
));
prom_printf
(
" rmo_top : %x
\n
"
,
RELOC
(
rmo_top
));
prom_printf
(
" ram_top : %x
\n
"
,
RELOC
(
ram_top
));
}
/*
* Allocate room for and instanciate RTAS
*/
static
void
__init
prom_instantiate_rtas
(
void
)
{
unsigned
long
offset
=
reloc_offset
();
struct
prom_t
*
_prom
=
PTRRELOC
(
&
prom
);
phandle
rtas_node
;
ihandle
rtas_inst
;
u32
base
,
entry
=
0
;
u32
size
=
0
;
prom_debug
(
"prom_instantiate_rtas: start...
\n
"
);
rtas_node
=
call_prom
(
"finddevice"
,
1
,
1
,
ADDR
(
"/rtas"
));
prom_debug
(
"rtas_node: %x
\n
"
,
rtas_node
);
if
(
!
PHANDLE_VALID
(
rtas_node
))
return
;
prom_getprop
(
rtas_node
,
"rtas-size"
,
&
size
,
sizeof
(
size
));
if
(
size
==
0
)
return
;
base
=
alloc_down
(
size
,
PAGE_SIZE
,
0
);
if
(
base
==
0
)
{
prom_printf
(
"RTAS allocation failed !
\n
"
);
return
;
}
rtas_inst
=
call_prom
(
"open"
,
1
,
1
,
ADDR
(
"/rtas"
));
if
(
!
IHANDLE_VALID
(
rtas_inst
))
{
prom_printf
(
"opening rtas package failed"
);
return
;
}
prom_printf
(
"instantiating rtas at 0x%x ..."
,
base
);
if
(
call_prom
(
"call-method"
,
3
,
2
,
ADDR
(
"instantiate-rtas"
),
rtas_inst
,
base
)
!=
PROM_ERROR
)
{
entry
=
(
long
)
_prom
->
args
.
rets
[
1
];
}
if
(
entry
==
0
)
{
prom_printf
(
" failed
\n
"
);
return
;
}
prom_printf
(
" done
\n
"
);
reserve_mem
(
base
,
size
);
prom_setprop
(
rtas_node
,
"linux,rtas-base"
,
&
base
,
sizeof
(
base
));
prom_setprop
(
rtas_node
,
"linux,rtas-entry"
,
&
entry
,
sizeof
(
entry
));
prom_debug
(
"rtas base = 0x%x
\n
"
,
base
);
prom_debug
(
"rtas entry = 0x%x
\n
"
,
entry
);
prom_debug
(
"rtas size = 0x%x
\n
"
,
(
long
)
size
);
prom_debug
(
"prom_instantiate_rtas: end...
\n
"
);
}
/*
* Allocate room for and initialize TCE tables
*/
static
void
__init
prom_initialize_tce_table
(
void
)
{
phandle
node
;
ihandle
phb_node
;
unsigned
long
offset
=
reloc_offset
();
char
compatible
[
64
],
type
[
64
],
model
[
64
];
char
*
path
=
RELOC
(
prom_scratch
);
u64
base
,
align
;
u32
minalign
,
minsize
;
u64
tce_entry
,
*
tce_entryp
;
u64
local_alloc_top
,
local_alloc_bottom
;
u64
i
;
if
(
RELOC
(
ppc64_iommu_off
))
return
;
prom_debug
(
"starting prom_initialize_tce_table
\n
"
);
/* Cache current top of allocs so we reserve a single block */
local_alloc_top
=
RELOC
(
alloc_top_high
);
local_alloc_bottom
=
local_alloc_top
;
/* Search all nodes looking for PHBs. */
for
(
node
=
0
;
prom_next_node
(
&
node
);
)
{
compatible
[
0
]
=
0
;
type
[
0
]
=
0
;
model
[
0
]
=
0
;
prom_getprop
(
node
,
"compatible"
,
compatible
,
sizeof
(
compatible
));
prom_getprop
(
node
,
"device_type"
,
type
,
sizeof
(
type
));
prom_getprop
(
node
,
"model"
,
model
,
sizeof
(
model
));
if
((
type
[
0
]
==
0
)
||
(
strstr
(
type
,
RELOC
(
"pci"
))
==
NULL
))
continue
;
/* Keep the old logic in tack to avoid regression. */
if
(
compatible
[
0
]
!=
0
)
{
if
((
strstr
(
compatible
,
RELOC
(
"python"
))
==
NULL
)
&&
(
strstr
(
compatible
,
RELOC
(
"Speedwagon"
))
==
NULL
)
&&
(
strstr
(
compatible
,
RELOC
(
"Winnipeg"
))
==
NULL
))
continue
;
}
else
if
(
model
[
0
]
!=
0
)
{
if
((
strstr
(
model
,
RELOC
(
"ython"
))
==
NULL
)
&&
(
strstr
(
model
,
RELOC
(
"peedwagon"
))
==
NULL
)
&&
(
strstr
(
model
,
RELOC
(
"innipeg"
))
==
NULL
))
continue
;
}
if
(
prom_getprop
(
node
,
"tce-table-minalign"
,
&
minalign
,
sizeof
(
minalign
))
==
PROM_ERROR
)
minalign
=
0
;
if
(
prom_getprop
(
node
,
"tce-table-minsize"
,
&
minsize
,
sizeof
(
minsize
))
==
PROM_ERROR
)
minsize
=
4UL
<<
20
;
/*
* Even though we read what OF wants, we just set the table
* size to 4 MB. This is enough to map 2GB of PCI DMA space.
* By doing this, we avoid the pitfalls of trying to DMA to
* MMIO space and the DMA alias hole.
*
* On POWER4, firmware sets the TCE region by assuming
* each TCE table is 8MB. Using this memory for anything
* else will impact performance, so we always allocate 8MB.
* Anton
*/
if
(
__is_processor
(
PV_POWER4
)
||
__is_processor
(
PV_POWER4p
))
minsize
=
8UL
<<
20
;
else
minsize
=
4UL
<<
20
;
/* Align to the greater of the align or size */
align
=
max
(
minalign
,
minsize
);
base
=
alloc_down
(
minsize
,
align
,
1
);
if
(
base
==
0
)
prom_panic
(
"ERROR, cannot find space for TCE table.
\n
"
);
if
(
base
<
local_alloc_bottom
)
local_alloc_bottom
=
base
;
/* Save away the TCE table attributes for later use. */
prom_setprop
(
node
,
"linux,tce-base"
,
&
base
,
sizeof
(
base
));
prom_setprop
(
node
,
"linux,tce-size"
,
&
minsize
,
sizeof
(
minsize
));
/* It seems OF doesn't null-terminate the path :-( */
memset
(
path
,
0
,
sizeof
(
path
));
/* Call OF to setup the TCE hardware */
if
(
call_prom
(
"package-to-path"
,
3
,
1
,
node
,
path
,
PROM_SCRATCH_SIZE
-
1
)
==
PROM_ERROR
)
{
prom_printf
(
"package-to-path failed
\n
"
);
}
prom_debug
(
"TCE table: %s
\n
"
,
path
);
prom_debug
(
"
\t
node = 0x%x
\n
"
,
node
);
prom_debug
(
"
\t
base = 0x%x
\n
"
,
base
);
prom_debug
(
"
\t
size = 0x%x
\n
"
,
minsize
);
/* Initialize the table to have a one-to-one mapping
* over the allocated size.
*/
tce_entryp
=
(
unsigned
long
*
)
base
;
for
(
i
=
0
;
i
<
(
minsize
>>
3
)
;
tce_entryp
++
,
i
++
)
{
tce_entry
=
(
i
<<
PAGE_SHIFT
);
tce_entry
|=
0x3
;
*
tce_entryp
=
tce_entry
;
}
prom_printf
(
"opening PHB %s"
,
path
);
phb_node
=
call_prom
(
"open"
,
1
,
1
,
path
);
if
(
phb_node
==
0
)
prom_printf
(
"... failed
\n
"
);
else
prom_printf
(
"... done
\n
"
);
call_prom
(
"call-method"
,
6
,
0
,
ADDR
(
"set-64-bit-addressing"
),
phb_node
,
-
1
,
minsize
,
(
u32
)
base
,
(
u32
)
(
base
>>
32
));
call_prom
(
"close"
,
1
,
0
,
phb_node
);
}
reserve_mem
(
local_alloc_bottom
,
local_alloc_top
-
local_alloc_bottom
);
if
(
RELOC
(
prom_memory_limit
))
{
/*
* We align the start to a 16MB boundary so we can map the TCE area
* using large pages if possible. The end should be the top of RAM
* so no need to align it.
*/
RELOC
(
prom_tce_alloc_start
)
=
_ALIGN_DOWN
(
local_alloc_bottom
,
0x1000000
);
RELOC
(
prom_tce_alloc_end
)
=
local_alloc_top
;
}
/* Flag the first invalid entry */
prom_debug
(
"ending prom_initialize_tce_table
\n
"
);
}
/*
* With CHRP SMP we need to use the OF to start the other
* processors so we can't wait until smp_boot_cpus (the OF is
* trashed by then) so we have to put the processors into
* a holding pattern controlled by the kernel (not OF) before
* we destroy the OF.
*
* This uses a chunk of low memory, puts some holding pattern
* code there and sends the other processors off to there until
* smp_boot_cpus tells them to do something. The holding pattern
* checks that address until its cpu # is there, when it is that
* cpu jumps to __secondary_start(). smp_boot_cpus() takes care
* of setting those values.
*
* We also use physical address 0x4 here to tell when a cpu
* is in its holding pattern code.
*
* Fixup comment... DRENG / PPPBBB - Peter
*
* -- Cort
*/
static
void
__init
prom_hold_cpus
(
void
)
{
unsigned
long
i
;
unsigned
int
reg
;
phandle
node
;
unsigned
long
offset
=
reloc_offset
();
char
type
[
64
];
int
cpuid
=
0
;
unsigned
int
interrupt_server
[
MAX_CPU_THREADS
];
unsigned
int
cpu_threads
,
hw_cpu_num
;
int
propsize
;
extern
void
__secondary_hold
(
void
);
extern
unsigned
long
__secondary_hold_spinloop
;
extern
unsigned
long
__secondary_hold_acknowledge
;
unsigned
long
*
spinloop
=
(
void
*
)
virt_to_abs
(
&
__secondary_hold_spinloop
);
unsigned
long
*
acknowledge
=
(
void
*
)
virt_to_abs
(
&
__secondary_hold_acknowledge
);
unsigned
long
secondary_hold
=
virt_to_abs
(
*
PTRRELOC
((
unsigned
long
*
)
__secondary_hold
));
struct
prom_t
*
_prom
=
PTRRELOC
(
&
prom
);
prom_debug
(
"prom_hold_cpus: start...
\n
"
);
prom_debug
(
" 1) spinloop = 0x%x
\n
"
,
(
unsigned
long
)
spinloop
);
prom_debug
(
" 1) *spinloop = 0x%x
\n
"
,
*
spinloop
);
prom_debug
(
" 1) acknowledge = 0x%x
\n
"
,
(
unsigned
long
)
acknowledge
);
prom_debug
(
" 1) *acknowledge = 0x%x
\n
"
,
*
acknowledge
);
prom_debug
(
" 1) secondary_hold = 0x%x
\n
"
,
secondary_hold
);
/* Set the common spinloop variable, so all of the secondary cpus
* will block when they are awakened from their OF spinloop.
* This must occur for both SMP and non SMP kernels, since OF will
* be trashed when we move the kernel.
*/
*
spinloop
=
0
;
#ifdef CONFIG_HMT
for
(
i
=
0
;
i
<
NR_CPUS
;
i
++
)
{
RELOC
(
hmt_thread_data
)[
i
].
pir
=
0xdeadbeef
;
}
#endif
/* look for cpus */
for
(
node
=
0
;
prom_next_node
(
&
node
);
)
{
type
[
0
]
=
0
;
prom_getprop
(
node
,
"device_type"
,
type
,
sizeof
(
type
));
if
(
strcmp
(
type
,
RELOC
(
"cpu"
))
!=
0
)
continue
;
/* Skip non-configured cpus. */
if
(
prom_getprop
(
node
,
"status"
,
type
,
sizeof
(
type
))
>
0
)
if
(
strcmp
(
type
,
RELOC
(
"okay"
))
!=
0
)
continue
;
reg
=
-
1
;
prom_getprop
(
node
,
"reg"
,
&
reg
,
sizeof
(
reg
));
prom_debug
(
"
\n
cpuid = 0x%x
\n
"
,
cpuid
);
prom_debug
(
"cpu hw idx = 0x%x
\n
"
,
reg
);
/* Init the acknowledge var which will be reset by
* the secondary cpu when it awakens from its OF
* spinloop.
*/
*
acknowledge
=
(
unsigned
long
)
-
1
;
propsize
=
prom_getprop
(
node
,
"ibm,ppc-interrupt-server#s"
,
&
interrupt_server
,
sizeof
(
interrupt_server
));
if
(
propsize
<
0
)
{
/* no property. old hardware has no SMT */
cpu_threads
=
1
;
interrupt_server
[
0
]
=
reg
;
/* fake it with phys id */
}
else
{
/* We have a threaded processor */
cpu_threads
=
propsize
/
sizeof
(
u32
);
if
(
cpu_threads
>
MAX_CPU_THREADS
)
{
prom_printf
(
"SMT: too many threads!
\n
"
"SMT: found %x, max is %x
\n
"
,
cpu_threads
,
MAX_CPU_THREADS
);
cpu_threads
=
1
;
/* ToDo: panic? */
}
}
hw_cpu_num
=
interrupt_server
[
0
];
if
(
hw_cpu_num
!=
_prom
->
cpu
)
{
/* Primary Thread of non-boot cpu */
prom_printf
(
"%x : starting cpu hw idx %x... "
,
cpuid
,
reg
);
call_prom
(
"start-cpu"
,
3
,
0
,
node
,
secondary_hold
,
reg
);
for
(
i
=
0
;
(
i
<
100000000
)
&&
(
*
acknowledge
==
((
unsigned
long
)
-
1
));
i
++
)
mb
();
if
(
*
acknowledge
==
reg
)
{
prom_printf
(
"done
\n
"
);
/* We have to get every CPU out of OF,
* even if we never start it. */
if
(
cpuid
>=
NR_CPUS
)
goto
next
;
}
else
{
prom_printf
(
"failed: %x
\n
"
,
*
acknowledge
);
}
}
#ifdef CONFIG_SMP
else
prom_printf
(
"%x : boot cpu %x
\n
"
,
cpuid
,
reg
);
#endif
next:
#ifdef CONFIG_SMP
/* Init paca for secondary threads. They start later. */
for
(
i
=
1
;
i
<
cpu_threads
;
i
++
)
{
cpuid
++
;
if
(
cpuid
>=
NR_CPUS
)
continue
;
}
#endif
/* CONFIG_SMP */
cpuid
++
;
}
#ifdef CONFIG_HMT
/* Only enable HMT on processors that provide support. */
if
(
__is_processor
(
PV_PULSAR
)
||
__is_processor
(
PV_ICESTAR
)
||
__is_processor
(
PV_SSTAR
))
{
prom_printf
(
" starting secondary threads
\n
"
);
for
(
i
=
0
;
i
<
NR_CPUS
;
i
+=
2
)
{
if
(
!
cpu_online
(
i
))
continue
;
if
(
i
==
0
)
{
unsigned
long
pir
=
mfspr
(
SPRN_PIR
);
if
(
__is_processor
(
PV_PULSAR
))
{
RELOC
(
hmt_thread_data
)[
i
].
pir
=
pir
&
0x1f
;
}
else
{
RELOC
(
hmt_thread_data
)[
i
].
pir
=
pir
&
0x3ff
;
}
}
}
}
else
{
prom_printf
(
"Processor is not HMT capable
\n
"
);
}
#endif
if
(
cpuid
>
NR_CPUS
)
prom_printf
(
"WARNING: maximum CPUs ("
__stringify
(
NR_CPUS
)
") exceeded: ignoring extras
\n
"
);
prom_debug
(
"prom_hold_cpus: end...
\n
"
);
}
static
void
__init
prom_init_client_services
(
unsigned
long
pp
)
{
unsigned
long
offset
=
reloc_offset
();
struct
prom_t
*
_prom
=
PTRRELOC
(
&
prom
);
/* Get a handle to the prom entry point before anything else */
_prom
->
entry
=
pp
;
/* Init default value for phys size */
_prom
->
root_size_cells
=
1
;
_prom
->
root_addr_cells
=
2
;
/* get a handle for the stdout device */
_prom
->
chosen
=
call_prom
(
"finddevice"
,
1
,
1
,
ADDR
(
"/chosen"
));
if
(
!
PHANDLE_VALID
(
_prom
->
chosen
))
prom_panic
(
"cannot find chosen"
);
/* msg won't be printed :( */
/* get device tree root */
_prom
->
root
=
call_prom
(
"finddevice"
,
1
,
1
,
ADDR
(
"/"
));
if
(
!
PHANDLE_VALID
(
_prom
->
root
))
prom_panic
(
"cannot find device tree root"
);
/* msg won't be printed :( */
}
static
void
__init
prom_init_stdout
(
void
)
{
unsigned
long
offset
=
reloc_offset
();
struct
prom_t
*
_prom
=
PTRRELOC
(
&
prom
);
char
*
path
=
RELOC
(
of_stdout_device
);
char
type
[
16
];
u32
val
;
if
(
prom_getprop
(
_prom
->
chosen
,
"stdout"
,
&
val
,
sizeof
(
val
))
<=
0
)
prom_panic
(
"cannot find stdout"
);
_prom
->
stdout
=
val
;
/* Get the full OF pathname of the stdout device */
memset
(
path
,
0
,
256
);
call_prom
(
"instance-to-path"
,
3
,
1
,
_prom
->
stdout
,
path
,
255
);
val
=
call_prom
(
"instance-to-package"
,
1
,
1
,
_prom
->
stdout
);
prom_setprop
(
_prom
->
chosen
,
"linux,stdout-package"
,
&
val
,
sizeof
(
val
));
prom_printf
(
"OF stdout device is: %s
\n
"
,
RELOC
(
of_stdout_device
));
prom_setprop
(
_prom
->
chosen
,
"linux,stdout-path"
,
RELOC
(
of_stdout_device
),
strlen
(
RELOC
(
of_stdout_device
))
+
1
);
/* If it's a display, note it */
memset
(
type
,
0
,
sizeof
(
type
));
prom_getprop
(
val
,
"device_type"
,
type
,
sizeof
(
type
));
if
(
strcmp
(
type
,
RELOC
(
"display"
))
==
0
)
{
_prom
->
disp_node
=
val
;
prom_setprop
(
val
,
"linux,boot-display"
,
NULL
,
0
);
}
}
static
void
__init
prom_close_stdin
(
void
)
{
unsigned
long
offset
=
reloc_offset
();
struct
prom_t
*
_prom
=
PTRRELOC
(
&
prom
);
ihandle
val
;
if
(
prom_getprop
(
_prom
->
chosen
,
"stdin"
,
&
val
,
sizeof
(
val
))
>
0
)
call_prom
(
"close"
,
1
,
0
,
val
);
}
static
int
__init
prom_find_machine_type
(
void
)
{
unsigned
long
offset
=
reloc_offset
();
struct
prom_t
*
_prom
=
PTRRELOC
(
&
prom
);
char
compat
[
256
];
int
len
,
i
=
0
;
phandle
rtas
;
len
=
prom_getprop
(
_prom
->
root
,
"compatible"
,
compat
,
sizeof
(
compat
)
-
1
);
if
(
len
>
0
)
{
compat
[
len
]
=
0
;
while
(
i
<
len
)
{
char
*
p
=
&
compat
[
i
];
int
sl
=
strlen
(
p
);
if
(
sl
==
0
)
break
;
if
(
strstr
(
p
,
RELOC
(
"Power Macintosh"
))
||
strstr
(
p
,
RELOC
(
"MacRISC4"
)))
return
PLATFORM_POWERMAC
;
if
(
strstr
(
p
,
RELOC
(
"Momentum,Maple"
)))
return
PLATFORM_MAPLE
;
i
+=
sl
+
1
;
}
}
/* Default to pSeries. We need to know if we are running LPAR */
rtas
=
call_prom
(
"finddevice"
,
1
,
1
,
ADDR
(
"/rtas"
));
if
(
PHANDLE_VALID
(
rtas
))
{
int
x
=
prom_getproplen
(
rtas
,
"ibm,hypertas-functions"
);
if
(
x
!=
PROM_ERROR
)
{
prom_printf
(
"Hypertas detected, assuming LPAR !
\n
"
);
return
PLATFORM_PSERIES_LPAR
;
}
}
return
PLATFORM_PSERIES
;
}
static
int
__init
prom_set_color
(
ihandle
ih
,
int
i
,
int
r
,
int
g
,
int
b
)
{
unsigned
long
offset
=
reloc_offset
();
return
call_prom
(
"call-method"
,
6
,
1
,
ADDR
(
"color!"
),
ih
,
i
,
b
,
g
,
r
);
}
/*
* If we have a display that we don't know how to drive,
* we will want to try to execute OF's open method for it
* later. However, OF will probably fall over if we do that
* we've taken over the MMU.
* So we check whether we will need to open the display,
* and if so, open it now.
*/
static
void
__init
prom_check_displays
(
void
)
{
unsigned
long
offset
=
reloc_offset
();
struct
prom_t
*
_prom
=
PTRRELOC
(
&
prom
);
char
type
[
16
],
*
path
;
phandle
node
;
ihandle
ih
;
int
i
;
static
unsigned
char
default_colors
[]
=
{
0x00
,
0x00
,
0x00
,
0x00
,
0x00
,
0xaa
,
0x00
,
0xaa
,
0x00
,
0x00
,
0xaa
,
0xaa
,
0xaa
,
0x00
,
0x00
,
0xaa
,
0x00
,
0xaa
,
0xaa
,
0xaa
,
0x00
,
0xaa
,
0xaa
,
0xaa
,
0x55
,
0x55
,
0x55
,
0x55
,
0x55
,
0xff
,
0x55
,
0xff
,
0x55
,
0x55
,
0xff
,
0xff
,
0xff
,
0x55
,
0x55
,
0xff
,
0x55
,
0xff
,
0xff
,
0xff
,
0x55
,
0xff
,
0xff
,
0xff
};
const
unsigned
char
*
clut
;
prom_printf
(
"Looking for displays
\n
"
);
for
(
node
=
0
;
prom_next_node
(
&
node
);
)
{
memset
(
type
,
0
,
sizeof
(
type
));
prom_getprop
(
node
,
"device_type"
,
type
,
sizeof
(
type
));
if
(
strcmp
(
type
,
RELOC
(
"display"
))
!=
0
)
continue
;
/* It seems OF doesn't null-terminate the path :-( */
path
=
RELOC
(
prom_scratch
);
memset
(
path
,
0
,
PROM_SCRATCH_SIZE
);
/*
* leave some room at the end of the path for appending extra
* arguments
*/
if
(
call_prom
(
"package-to-path"
,
3
,
1
,
node
,
path
,
PROM_SCRATCH_SIZE
-
10
)
==
PROM_ERROR
)
continue
;
prom_printf
(
"found display : %s, opening ... "
,
path
);
ih
=
call_prom
(
"open"
,
1
,
1
,
path
);
if
(
ih
==
0
)
{
prom_printf
(
"failed
\n
"
);
continue
;
}
/* Success */
prom_printf
(
"done
\n
"
);
prom_setprop
(
node
,
"linux,opened"
,
NULL
,
0
);
/*
* stdout wasn't a display node, pick the first we can find
* for btext
*/
if
(
_prom
->
disp_node
==
0
)
_prom
->
disp_node
=
node
;
/* Setup a useable color table when the appropriate
* method is available. Should update this to set-colors */
clut
=
RELOC
(
default_colors
);
for
(
i
=
0
;
i
<
32
;
i
++
,
clut
+=
3
)
if
(
prom_set_color
(
ih
,
i
,
clut
[
0
],
clut
[
1
],
clut
[
2
])
!=
0
)
break
;
#ifdef CONFIG_LOGO_LINUX_CLUT224
clut
=
PTRRELOC
(
RELOC
(
logo_linux_clut224
.
clut
));
for
(
i
=
0
;
i
<
RELOC
(
logo_linux_clut224
.
clutsize
);
i
++
,
clut
+=
3
)
if
(
prom_set_color
(
ih
,
i
+
32
,
clut
[
0
],
clut
[
1
],
clut
[
2
])
!=
0
)
break
;
#endif
/* CONFIG_LOGO_LINUX_CLUT224 */
}
}
/* Return (relocated) pointer to this much memory: moves initrd if reqd. */
static
void
__init
*
make_room
(
unsigned
long
*
mem_start
,
unsigned
long
*
mem_end
,
unsigned
long
needed
,
unsigned
long
align
)
{
unsigned
long
offset
=
reloc_offset
();
void
*
ret
;
*
mem_start
=
_ALIGN
(
*
mem_start
,
align
);
while
((
*
mem_start
+
needed
)
>
*
mem_end
)
{
unsigned
long
room
,
chunk
;
prom_debug
(
"Chunk exhausted, claiming more at %x...
\n
"
,
RELOC
(
alloc_bottom
));
room
=
RELOC
(
alloc_top
)
-
RELOC
(
alloc_bottom
);
if
(
room
>
DEVTREE_CHUNK_SIZE
)
room
=
DEVTREE_CHUNK_SIZE
;
if
(
room
<
PAGE_SIZE
)
prom_panic
(
"No memory for flatten_device_tree (no room)"
);
chunk
=
alloc_up
(
room
,
0
);
if
(
chunk
==
0
)
prom_panic
(
"No memory for flatten_device_tree (claim failed)"
);
*
mem_end
=
RELOC
(
alloc_top
);
}
ret
=
(
void
*
)
*
mem_start
;
*
mem_start
+=
needed
;
return
ret
;
}
#define dt_push_token(token, mem_start, mem_end) \
do { *((u32 *)make_room(mem_start, mem_end, 4, 4)) = token; } while(0)
static
unsigned
long
__init
dt_find_string
(
char
*
str
)
{
unsigned
long
offset
=
reloc_offset
();
char
*
s
,
*
os
;
s
=
os
=
(
char
*
)
RELOC
(
dt_string_start
);
s
+=
4
;
while
(
s
<
(
char
*
)
RELOC
(
dt_string_end
))
{
if
(
strcmp
(
s
,
str
)
==
0
)
return
s
-
os
;
s
+=
strlen
(
s
)
+
1
;
}
return
0
;
}
/*
* The Open Firmware 1275 specification states properties must be 31 bytes or
* less, however not all firmwares obey this. Make it 64 bytes to be safe.
*/
#define MAX_PROPERTY_NAME 64
static
void
__init
scan_dt_build_strings
(
phandle
node
,
unsigned
long
*
mem_start
,
unsigned
long
*
mem_end
)
{
unsigned
long
offset
=
reloc_offset
();
char
*
prev_name
,
*
namep
,
*
sstart
;
unsigned
long
soff
;
phandle
child
;
sstart
=
(
char
*
)
RELOC
(
dt_string_start
);
/* get and store all property names */
prev_name
=
RELOC
(
""
);
for
(;;)
{
/* 64 is max len of name including nul. */
namep
=
make_room
(
mem_start
,
mem_end
,
MAX_PROPERTY_NAME
,
1
);
if
(
call_prom
(
"nextprop"
,
3
,
1
,
node
,
prev_name
,
namep
)
!=
1
)
{
/* No more nodes: unwind alloc */
*
mem_start
=
(
unsigned
long
)
namep
;
break
;
}
/* skip "name" */
if
(
strcmp
(
namep
,
RELOC
(
"name"
))
==
0
)
{
*
mem_start
=
(
unsigned
long
)
namep
;
prev_name
=
RELOC
(
"name"
);
continue
;
}
/* get/create string entry */
soff
=
dt_find_string
(
namep
);
if
(
soff
!=
0
)
{
*
mem_start
=
(
unsigned
long
)
namep
;
namep
=
sstart
+
soff
;
}
else
{
/* Trim off some if we can */
*
mem_start
=
(
unsigned
long
)
namep
+
strlen
(
namep
)
+
1
;
RELOC
(
dt_string_end
)
=
*
mem_start
;
}
prev_name
=
namep
;
}
/* do all our children */
child
=
call_prom
(
"child"
,
1
,
1
,
node
);
while
(
child
!=
0
)
{
scan_dt_build_strings
(
child
,
mem_start
,
mem_end
);
child
=
call_prom
(
"peer"
,
1
,
1
,
child
);
}
}
static
void
__init
scan_dt_build_struct
(
phandle
node
,
unsigned
long
*
mem_start
,
unsigned
long
*
mem_end
)
{
phandle
child
;
char
*
namep
,
*
prev_name
,
*
sstart
,
*
p
,
*
ep
,
*
lp
,
*
path
;
unsigned
long
soff
;
unsigned
char
*
valp
;
unsigned
long
offset
=
reloc_offset
();
static
char
pname
[
MAX_PROPERTY_NAME
];
int
l
;
dt_push_token
(
OF_DT_BEGIN_NODE
,
mem_start
,
mem_end
);
/* get the node's full name */
namep
=
(
char
*
)
*
mem_start
;
l
=
call_prom
(
"package-to-path"
,
3
,
1
,
node
,
namep
,
*
mem_end
-
*
mem_start
);
if
(
l
>=
0
)
{
/* Didn't fit? Get more room. */
if
((
l
+
1
)
>
(
*
mem_end
-
*
mem_start
))
{
namep
=
make_room
(
mem_start
,
mem_end
,
l
+
1
,
1
);
call_prom
(
"package-to-path"
,
3
,
1
,
node
,
namep
,
l
);
}
namep
[
l
]
=
'\0'
;
/* Fixup an Apple bug where they have bogus \0 chars in the
* middle of the path in some properties
*/
for
(
p
=
namep
,
ep
=
namep
+
l
;
p
<
ep
;
p
++
)
if
(
*
p
==
'\0'
)
{
memmove
(
p
,
p
+
1
,
ep
-
p
);
ep
--
;
l
--
;
p
--
;
}
/* now try to extract the unit name in that mess */
for
(
p
=
namep
,
lp
=
NULL
;
*
p
;
p
++
)
if
(
*
p
==
'/'
)
lp
=
p
+
1
;
if
(
lp
!=
NULL
)
memmove
(
namep
,
lp
,
strlen
(
lp
)
+
1
);
*
mem_start
=
_ALIGN
(((
unsigned
long
)
namep
)
+
strlen
(
namep
)
+
1
,
4
);
}
/* get it again for debugging */
path
=
RELOC
(
prom_scratch
);
memset
(
path
,
0
,
PROM_SCRATCH_SIZE
);
call_prom
(
"package-to-path"
,
3
,
1
,
node
,
path
,
PROM_SCRATCH_SIZE
-
1
);
/* get and store all properties */
prev_name
=
RELOC
(
""
);
sstart
=
(
char
*
)
RELOC
(
dt_string_start
);
for
(;;)
{
if
(
call_prom
(
"nextprop"
,
3
,
1
,
node
,
prev_name
,
RELOC
(
pname
))
!=
1
)
break
;
/* skip "name" */
if
(
strcmp
(
RELOC
(
pname
),
RELOC
(
"name"
))
==
0
)
{
prev_name
=
RELOC
(
"name"
);
continue
;
}
/* find string offset */
soff
=
dt_find_string
(
RELOC
(
pname
));
if
(
soff
==
0
)
{
prom_printf
(
"WARNING: Can't find string index for"
" <%s>, node %s
\n
"
,
RELOC
(
pname
),
path
);
break
;
}
prev_name
=
sstart
+
soff
;
/* get length */
l
=
call_prom
(
"getproplen"
,
2
,
1
,
node
,
RELOC
(
pname
));
/* sanity checks */
if
(
l
==
PROM_ERROR
)
continue
;
if
(
l
>
MAX_PROPERTY_LENGTH
)
{
prom_printf
(
"WARNING: ignoring large property "
);
/* It seems OF doesn't null-terminate the path :-( */
prom_printf
(
"[%s] "
,
path
);
prom_printf
(
"%s length 0x%x
\n
"
,
RELOC
(
pname
),
l
);
continue
;
}
/* push property head */
dt_push_token
(
OF_DT_PROP
,
mem_start
,
mem_end
);
dt_push_token
(
l
,
mem_start
,
mem_end
);
dt_push_token
(
soff
,
mem_start
,
mem_end
);
/* push property content */
valp
=
make_room
(
mem_start
,
mem_end
,
l
,
4
);
call_prom
(
"getprop"
,
4
,
1
,
node
,
RELOC
(
pname
),
valp
,
l
);
*
mem_start
=
_ALIGN
(
*
mem_start
,
4
);
}
/* Add a "linux,phandle" property. */
soff
=
dt_find_string
(
RELOC
(
"linux,phandle"
));
if
(
soff
==
0
)
prom_printf
(
"WARNING: Can't find string index for"
" <linux-phandle> node %s
\n
"
,
path
);
else
{
dt_push_token
(
OF_DT_PROP
,
mem_start
,
mem_end
);
dt_push_token
(
4
,
mem_start
,
mem_end
);
dt_push_token
(
soff
,
mem_start
,
mem_end
);
valp
=
make_room
(
mem_start
,
mem_end
,
4
,
4
);
*
(
u32
*
)
valp
=
node
;
}
/* do all our children */
child
=
call_prom
(
"child"
,
1
,
1
,
node
);
while
(
child
!=
0
)
{
scan_dt_build_struct
(
child
,
mem_start
,
mem_end
);
child
=
call_prom
(
"peer"
,
1
,
1
,
child
);
}
dt_push_token
(
OF_DT_END_NODE
,
mem_start
,
mem_end
);
}
static
void
__init
flatten_device_tree
(
void
)
{
phandle
root
;
unsigned
long
offset
=
reloc_offset
();
unsigned
long
mem_start
,
mem_end
,
room
;
struct
boot_param_header
*
hdr
;
struct
prom_t
*
_prom
=
PTRRELOC
(
&
prom
);
char
*
namep
;
u64
*
rsvmap
;
/*
* Check how much room we have between alloc top & bottom (+/- a
* few pages), crop to 4Mb, as this is our "chuck" size
*/
room
=
RELOC
(
alloc_top
)
-
RELOC
(
alloc_bottom
)
-
0x4000
;
if
(
room
>
DEVTREE_CHUNK_SIZE
)
room
=
DEVTREE_CHUNK_SIZE
;
prom_debug
(
"starting device tree allocs at %x
\n
"
,
RELOC
(
alloc_bottom
));
/* Now try to claim that */
mem_start
=
(
unsigned
long
)
alloc_up
(
room
,
PAGE_SIZE
);
if
(
mem_start
==
0
)
prom_panic
(
"Can't allocate initial device-tree chunk
\n
"
);
mem_end
=
RELOC
(
alloc_top
);
/* Get root of tree */
root
=
call_prom
(
"peer"
,
1
,
1
,
(
phandle
)
0
);
if
(
root
==
(
phandle
)
0
)
prom_panic
(
"couldn't get device tree root
\n
"
);
/* Build header and make room for mem rsv map */
mem_start
=
_ALIGN
(
mem_start
,
4
);
hdr
=
make_room
(
&
mem_start
,
&
mem_end
,
sizeof
(
struct
boot_param_header
),
4
);
RELOC
(
dt_header_start
)
=
(
unsigned
long
)
hdr
;
rsvmap
=
make_room
(
&
mem_start
,
&
mem_end
,
sizeof
(
mem_reserve_map
),
8
);
/* Start of strings */
mem_start
=
PAGE_ALIGN
(
mem_start
);
RELOC
(
dt_string_start
)
=
mem_start
;
mem_start
+=
4
;
/* hole */
/* Add "linux,phandle" in there, we'll need it */
namep
=
make_room
(
&
mem_start
,
&
mem_end
,
16
,
1
);
strcpy
(
namep
,
RELOC
(
"linux,phandle"
));
mem_start
=
(
unsigned
long
)
namep
+
strlen
(
namep
)
+
1
;
/* Build string array */
prom_printf
(
"Building dt strings...
\n
"
);
scan_dt_build_strings
(
root
,
&
mem_start
,
&
mem_end
);
RELOC
(
dt_string_end
)
=
mem_start
;
/* Build structure */
mem_start
=
PAGE_ALIGN
(
mem_start
);
RELOC
(
dt_struct_start
)
=
mem_start
;
prom_printf
(
"Building dt structure...
\n
"
);
scan_dt_build_struct
(
root
,
&
mem_start
,
&
mem_end
);
dt_push_token
(
OF_DT_END
,
&
mem_start
,
&
mem_end
);
RELOC
(
dt_struct_end
)
=
PAGE_ALIGN
(
mem_start
);
/* Finish header */
hdr
->
boot_cpuid_phys
=
_prom
->
cpu
;
hdr
->
magic
=
OF_DT_HEADER
;
hdr
->
totalsize
=
RELOC
(
dt_struct_end
)
-
RELOC
(
dt_header_start
);
hdr
->
off_dt_struct
=
RELOC
(
dt_struct_start
)
-
RELOC
(
dt_header_start
);
hdr
->
off_dt_strings
=
RELOC
(
dt_string_start
)
-
RELOC
(
dt_header_start
);
hdr
->
dt_strings_size
=
RELOC
(
dt_string_end
)
-
RELOC
(
dt_string_start
);
hdr
->
off_mem_rsvmap
=
((
unsigned
long
)
rsvmap
)
-
RELOC
(
dt_header_start
);
hdr
->
version
=
OF_DT_VERSION
;
/* Version 16 is not backward compatible */
hdr
->
last_comp_version
=
0x10
;
/* Reserve the whole thing and copy the reserve map in, we
* also bump mem_reserve_cnt to cause further reservations to
* fail since it's too late.
*/
reserve_mem
(
RELOC
(
dt_header_start
),
hdr
->
totalsize
);
memcpy
(
rsvmap
,
RELOC
(
mem_reserve_map
),
sizeof
(
mem_reserve_map
));
#ifdef DEBUG_PROM
{
int
i
;
prom_printf
(
"reserved memory map:
\n
"
);
for
(
i
=
0
;
i
<
RELOC
(
mem_reserve_cnt
);
i
++
)
prom_printf
(
" %x - %x
\n
"
,
RELOC
(
mem_reserve_map
)[
i
].
base
,
RELOC
(
mem_reserve_map
)[
i
].
size
);
}
#endif
RELOC
(
mem_reserve_cnt
)
=
MEM_RESERVE_MAP_SIZE
;
prom_printf
(
"Device tree strings 0x%x -> 0x%x
\n
"
,
RELOC
(
dt_string_start
),
RELOC
(
dt_string_end
));
prom_printf
(
"Device tree struct 0x%x -> 0x%x
\n
"
,
RELOC
(
dt_struct_start
),
RELOC
(
dt_struct_end
));
}
static
void
__init
fixup_device_tree
(
void
)
{
unsigned
long
offset
=
reloc_offset
();
phandle
u3
,
i2c
,
mpic
;
u32
u3_rev
;
u32
interrupts
[
2
];
u32
parent
;
/* Some G5s have a missing interrupt definition, fix it up here */
u3
=
call_prom
(
"finddevice"
,
1
,
1
,
ADDR
(
"/u3@0,f8000000"
));
if
(
!
PHANDLE_VALID
(
u3
))
return
;
i2c
=
call_prom
(
"finddevice"
,
1
,
1
,
ADDR
(
"/u3@0,f8000000/i2c@f8001000"
));
if
(
!
PHANDLE_VALID
(
i2c
))
return
;
mpic
=
call_prom
(
"finddevice"
,
1
,
1
,
ADDR
(
"/u3@0,f8000000/mpic@f8040000"
));
if
(
!
PHANDLE_VALID
(
mpic
))
return
;
/* check if proper rev of u3 */
if
(
prom_getprop
(
u3
,
"device-rev"
,
&
u3_rev
,
sizeof
(
u3_rev
))
==
PROM_ERROR
)
return
;
if
(
u3_rev
<
0x35
||
u3_rev
>
0x39
)
return
;
/* does it need fixup ? */
if
(
prom_getproplen
(
i2c
,
"interrupts"
)
>
0
)
return
;
prom_printf
(
"fixing up bogus interrupts for u3 i2c...
\n
"
);
/* interrupt on this revision of u3 is number 0 and level */
interrupts
[
0
]
=
0
;
interrupts
[
1
]
=
1
;
prom_setprop
(
i2c
,
"interrupts"
,
&
interrupts
,
sizeof
(
interrupts
));
parent
=
(
u32
)
mpic
;
prom_setprop
(
i2c
,
"interrupt-parent"
,
&
parent
,
sizeof
(
parent
));
}
static
void
__init
prom_find_boot_cpu
(
void
)
{
unsigned
long
offset
=
reloc_offset
();
struct
prom_t
*
_prom
=
PTRRELOC
(
&
prom
);
u32
getprop_rval
;
ihandle
prom_cpu
;
phandle
cpu_pkg
;
if
(
prom_getprop
(
_prom
->
chosen
,
"cpu"
,
&
prom_cpu
,
sizeof
(
prom_cpu
))
<=
0
)
prom_panic
(
"cannot find boot cpu"
);
cpu_pkg
=
call_prom
(
"instance-to-package"
,
1
,
1
,
prom_cpu
);
prom_getprop
(
cpu_pkg
,
"reg"
,
&
getprop_rval
,
sizeof
(
getprop_rval
));
_prom
->
cpu
=
getprop_rval
;
prom_debug
(
"Booting CPU hw index = 0x%x
\n
"
,
_prom
->
cpu
);
}
static
void
__init
prom_check_initrd
(
unsigned
long
r3
,
unsigned
long
r4
)
{
#ifdef CONFIG_BLK_DEV_INITRD
unsigned
long
offset
=
reloc_offset
();
struct
prom_t
*
_prom
=
PTRRELOC
(
&
prom
);
if
(
r3
&&
r4
&&
r4
!=
0xdeadbeef
)
{
u64
val
;
RELOC
(
prom_initrd_start
)
=
(
r3
>=
KERNELBASE
)
?
__pa
(
r3
)
:
r3
;
RELOC
(
prom_initrd_end
)
=
RELOC
(
prom_initrd_start
)
+
r4
;
val
=
(
u64
)
RELOC
(
prom_initrd_start
);
prom_setprop
(
_prom
->
chosen
,
"linux,initrd-start"
,
&
val
,
sizeof
(
val
));
val
=
(
u64
)
RELOC
(
prom_initrd_end
);
prom_setprop
(
_prom
->
chosen
,
"linux,initrd-end"
,
&
val
,
sizeof
(
val
));
reserve_mem
(
RELOC
(
prom_initrd_start
),
RELOC
(
prom_initrd_end
)
-
RELOC
(
prom_initrd_start
));
prom_debug
(
"initrd_start=0x%x
\n
"
,
RELOC
(
prom_initrd_start
));
prom_debug
(
"initrd_end=0x%x
\n
"
,
RELOC
(
prom_initrd_end
));
}
#endif
/* CONFIG_BLK_DEV_INITRD */
}
/*
* We enter here early on, when the Open Firmware prom is still
* handling exceptions and the MMU hash table for us.
*/
unsigned
long
__init
prom_init
(
unsigned
long
r3
,
unsigned
long
r4
,
unsigned
long
pp
,
unsigned
long
r6
,
unsigned
long
r7
)
{
unsigned
long
offset
=
reloc_offset
();
struct
prom_t
*
_prom
=
PTRRELOC
(
&
prom
);
unsigned
long
phys
=
KERNELBASE
-
offset
;
u32
getprop_rval
;
/*
* First zero the BSS
*/
memset
(
PTRRELOC
(
&
__bss_start
),
0
,
__bss_stop
-
__bss_start
);
/*
* Init interface to Open Firmware, get some node references,
* like /chosen
*/
prom_init_client_services
(
pp
);
/*
* Init prom stdout device
*/
prom_init_stdout
();
prom_debug
(
"klimit=0x%x
\n
"
,
RELOC
(
klimit
));
prom_debug
(
"offset=0x%x
\n
"
,
offset
);
/*
* Check for an initrd
*/
prom_check_initrd
(
r3
,
r4
);
/*
* Get default machine type. At this point, we do not differenciate
* between pSeries SMP and pSeries LPAR
*/
RELOC
(
of_platform
)
=
prom_find_machine_type
();
getprop_rval
=
RELOC
(
of_platform
);
prom_setprop
(
_prom
->
chosen
,
"linux,platform"
,
&
getprop_rval
,
sizeof
(
getprop_rval
));
/*
* On pSeries, inform the firmware about our capabilities
*/
if
(
RELOC
(
of_platform
)
==
PLATFORM_PSERIES
||
RELOC
(
of_platform
)
==
PLATFORM_PSERIES_LPAR
)
prom_send_capabilities
();
/*
* On pSeries and Cell, copy the CPU hold code
*/
if
(
RELOC
(
of_platform
)
&
(
PLATFORM_PSERIES
|
PLATFORM_CELL
))
copy_and_flush
(
0
,
KERNELBASE
-
offset
,
0x100
,
0
);
/*
* Get memory cells format
*/
getprop_rval
=
1
;
prom_getprop
(
_prom
->
root
,
"#size-cells"
,
&
getprop_rval
,
sizeof
(
getprop_rval
));
_prom
->
root_size_cells
=
getprop_rval
;
getprop_rval
=
2
;
prom_getprop
(
_prom
->
root
,
"#address-cells"
,
&
getprop_rval
,
sizeof
(
getprop_rval
));
_prom
->
root_addr_cells
=
getprop_rval
;
/*
* Do early parsing of command line
*/
early_cmdline_parse
();
/*
* Initialize memory management within prom_init
*/
prom_init_mem
();
/*
* Determine which cpu is actually running right _now_
*/
prom_find_boot_cpu
();
/*
* Initialize display devices
*/
prom_check_displays
();
/*
* Initialize IOMMU (TCE tables) on pSeries. Do that before anything else
* that uses the allocator, we need to make sure we get the top of memory
* available for us here...
*/
if
(
RELOC
(
of_platform
)
==
PLATFORM_PSERIES
)
prom_initialize_tce_table
();
/*
* On non-powermacs, try to instantiate RTAS and puts all CPUs
* in spin-loops. PowerMacs don't have a working RTAS and use
* a different way to spin CPUs
*/
if
(
RELOC
(
of_platform
)
!=
PLATFORM_POWERMAC
)
{
prom_instantiate_rtas
();
prom_hold_cpus
();
}
/*
* Fill in some infos for use by the kernel later on
*/
if
(
RELOC
(
ppc64_iommu_off
))
prom_setprop
(
_prom
->
chosen
,
"linux,iommu-off"
,
NULL
,
0
);
if
(
RELOC
(
iommu_force_on
))
prom_setprop
(
_prom
->
chosen
,
"linux,iommu-force-on"
,
NULL
,
0
);
if
(
RELOC
(
prom_memory_limit
))
prom_setprop
(
_prom
->
chosen
,
"linux,memory-limit"
,
PTRRELOC
(
&
prom_memory_limit
),
sizeof
(
RELOC
(
prom_memory_limit
)));
if
(
RELOC
(
prom_tce_alloc_start
))
{
prom_setprop
(
_prom
->
chosen
,
"linux,tce-alloc-start"
,
PTRRELOC
(
&
prom_tce_alloc_start
),
sizeof
(
RELOC
(
prom_tce_alloc_start
)));
prom_setprop
(
_prom
->
chosen
,
"linux,tce-alloc-end"
,
PTRRELOC
(
&
prom_tce_alloc_end
),
sizeof
(
RELOC
(
prom_tce_alloc_end
)));
}
/*
* Fixup any known bugs in the device-tree
*/
fixup_device_tree
();
/*
* Now finally create the flattened device-tree
*/
prom_printf
(
"copying OF device tree ...
\n
"
);
flatten_device_tree
();
/* in case stdin is USB and still active on IBM machines... */
prom_close_stdin
();
/*
* Call OF "quiesce" method to shut down pending DMA's from
* devices etc...
*/
prom_printf
(
"Calling quiesce ...
\n
"
);
call_prom
(
"quiesce"
,
0
,
0
);
/*
* And finally, call the kernel passing it the flattened device
* tree and NULL as r5, thus triggering the new entry point which
* is common to us and kexec
*/
prom_printf
(
"returning from prom_init
\n
"
);
prom_debug
(
"->dt_header_start=0x%x
\n
"
,
RELOC
(
dt_header_start
));
prom_debug
(
"->phys=0x%x
\n
"
,
phys
);
__start
(
RELOC
(
dt_header_start
),
phys
,
0
);
return
0
;
}
arch/ppc64/kernel/rtc.c
deleted
100644 → 0
View file @
302fe175
/*
* Real Time Clock interface for PPC64.
*
* Based on rtc.c by Paul Gortmaker
*
* This driver allows use of the real time clock
* from user space. It exports the /dev/rtc
* interface supporting various ioctl() and also the
* /proc/driver/rtc pseudo-file for status information.
*
* Interface does not support RTC interrupts nor an alarm.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* 1.0 Mike Corrigan: IBM iSeries rtc support
* 1.1 Dave Engebretsen: IBM pSeries rtc support
*/
#define RTC_VERSION "1.1"
#include <linux/config.h>
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/miscdevice.h>
#include <linux/ioport.h>
#include <linux/fcntl.h>
#include <linux/mc146818rtc.h>
#include <linux/init.h>
#include <linux/poll.h>
#include <linux/proc_fs.h>
#include <linux/spinlock.h>
#include <linux/bcd.h>
#include <linux/interrupt.h>
#include <linux/delay.h>
#include <asm/io.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/time.h>
#include <asm/rtas.h>
#include <asm/machdep.h>
/*
* We sponge a minor off of the misc major. No need slurping
* up another valuable major dev number for this. If you add
* an ioctl, make sure you don't conflict with SPARC's RTC
* ioctls.
*/
static
ssize_t
rtc_read
(
struct
file
*
file
,
char
__user
*
buf
,
size_t
count
,
loff_t
*
ppos
);
static
int
rtc_ioctl
(
struct
inode
*
inode
,
struct
file
*
file
,
unsigned
int
cmd
,
unsigned
long
arg
);
static
int
rtc_read_proc
(
char
*
page
,
char
**
start
,
off_t
off
,
int
count
,
int
*
eof
,
void
*
data
);
/*
* If this driver ever becomes modularised, it will be really nice
* to make the epoch retain its value across module reload...
*/
static
unsigned
long
epoch
=
1900
;
/* year corresponding to 0x00 */
static
const
unsigned
char
days_in_mo
[]
=
{
0
,
31
,
28
,
31
,
30
,
31
,
30
,
31
,
31
,
30
,
31
,
30
,
31
};
/*
* Now all the various file operations that we export.
*/
static
ssize_t
rtc_read
(
struct
file
*
file
,
char
__user
*
buf
,
size_t
count
,
loff_t
*
ppos
)
{
return
-
EIO
;
}
static
int
rtc_ioctl
(
struct
inode
*
inode
,
struct
file
*
file
,
unsigned
int
cmd
,
unsigned
long
arg
)
{
struct
rtc_time
wtime
;
switch
(
cmd
)
{
case
RTC_RD_TIME
:
/* Read the time/date from RTC */
{
memset
(
&
wtime
,
0
,
sizeof
(
struct
rtc_time
));
ppc_md
.
get_rtc_time
(
&
wtime
);
break
;
}
case
RTC_SET_TIME
:
/* Set the RTC */
{
struct
rtc_time
rtc_tm
;
unsigned
char
mon
,
day
,
hrs
,
min
,
sec
,
leap_yr
;
unsigned
int
yrs
;
if
(
!
capable
(
CAP_SYS_TIME
))
return
-
EACCES
;
if
(
copy_from_user
(
&
rtc_tm
,
(
struct
rtc_time
__user
*
)
arg
,
sizeof
(
struct
rtc_time
)))
return
-
EFAULT
;
yrs
=
rtc_tm
.
tm_year
;
mon
=
rtc_tm
.
tm_mon
+
1
;
/* tm_mon starts at zero */
day
=
rtc_tm
.
tm_mday
;
hrs
=
rtc_tm
.
tm_hour
;
min
=
rtc_tm
.
tm_min
;
sec
=
rtc_tm
.
tm_sec
;
if
(
yrs
<
70
)
return
-
EINVAL
;
leap_yr
=
((
!
(
yrs
%
4
)
&&
(
yrs
%
100
))
||
!
(
yrs
%
400
));
if
((
mon
>
12
)
||
(
day
==
0
))
return
-
EINVAL
;
if
(
day
>
(
days_in_mo
[
mon
]
+
((
mon
==
2
)
&&
leap_yr
)))
return
-
EINVAL
;
if
((
hrs
>=
24
)
||
(
min
>=
60
)
||
(
sec
>=
60
))
return
-
EINVAL
;
if
(
yrs
>
169
)
return
-
EINVAL
;
ppc_md
.
set_rtc_time
(
&
rtc_tm
);
return
0
;
}
case
RTC_EPOCH_READ
:
/* Read the epoch. */
{
return
put_user
(
epoch
,
(
unsigned
long
__user
*
)
arg
);
}
case
RTC_EPOCH_SET
:
/* Set the epoch. */
{
/*
* There were no RTC clocks before 1900.
*/
if
(
arg
<
1900
)
return
-
EINVAL
;
if
(
!
capable
(
CAP_SYS_TIME
))
return
-
EACCES
;
epoch
=
arg
;
return
0
;
}
default:
return
-
EINVAL
;
}
return
copy_to_user
((
void
__user
*
)
arg
,
&
wtime
,
sizeof
wtime
)
?
-
EFAULT
:
0
;
}
static
int
rtc_open
(
struct
inode
*
inode
,
struct
file
*
file
)
{
nonseekable_open
(
inode
,
file
);
return
0
;
}
static
int
rtc_release
(
struct
inode
*
inode
,
struct
file
*
file
)
{
return
0
;
}
/*
* The various file operations we support.
*/
static
struct
file_operations
rtc_fops
=
{
.
owner
=
THIS_MODULE
,
.
llseek
=
no_llseek
,
.
read
=
rtc_read
,
.
ioctl
=
rtc_ioctl
,
.
open
=
rtc_open
,
.
release
=
rtc_release
,
};
static
struct
miscdevice
rtc_dev
=
{
.
minor
=
RTC_MINOR
,
.
name
=
"rtc"
,
.
fops
=
&
rtc_fops
};
static
int
__init
rtc_init
(
void
)
{
int
retval
;
retval
=
misc_register
(
&
rtc_dev
);
if
(
retval
<
0
)
return
retval
;
#ifdef CONFIG_PROC_FS
if
(
create_proc_read_entry
(
"driver/rtc"
,
0
,
NULL
,
rtc_read_proc
,
NULL
)
==
NULL
)
{
misc_deregister
(
&
rtc_dev
);
return
-
ENOMEM
;
}
#endif
printk
(
KERN_INFO
"i/pSeries Real Time Clock Driver v"
RTC_VERSION
"
\n
"
);
return
0
;
}
static
void
__exit
rtc_exit
(
void
)
{
remove_proc_entry
(
"driver/rtc"
,
NULL
);
misc_deregister
(
&
rtc_dev
);
}
module_init
(
rtc_init
);
module_exit
(
rtc_exit
);
/*
* Info exported via "/proc/driver/rtc".
*/
static
int
rtc_proc_output
(
char
*
buf
)
{
char
*
p
;
struct
rtc_time
tm
;
p
=
buf
;
ppc_md
.
get_rtc_time
(
&
tm
);
/*
* There is no way to tell if the luser has the RTC set for local
* time or for Universal Standard Time (GMT). Probably local though.
*/
p
+=
sprintf
(
p
,
"rtc_time
\t
: %02d:%02d:%02d
\n
"
"rtc_date
\t
: %04d-%02d-%02d
\n
"
"rtc_epoch
\t
: %04lu
\n
"
,
tm
.
tm_hour
,
tm
.
tm_min
,
tm
.
tm_sec
,
tm
.
tm_year
+
1900
,
tm
.
tm_mon
+
1
,
tm
.
tm_mday
,
epoch
);
p
+=
sprintf
(
p
,
"DST_enable
\t
: no
\n
"
"BCD
\t\t
: yes
\n
"
"24hr
\t\t
: yes
\n
"
);
return
p
-
buf
;
}
static
int
rtc_read_proc
(
char
*
page
,
char
**
start
,
off_t
off
,
int
count
,
int
*
eof
,
void
*
data
)
{
int
len
=
rtc_proc_output
(
page
);
if
(
len
<=
off
+
count
)
*
eof
=
1
;
*
start
=
page
+
off
;
len
-=
off
;
if
(
len
>
count
)
len
=
count
;
if
(
len
<
0
)
len
=
0
;
return
len
;
}
#ifdef CONFIG_PPC_RTAS
#define MAX_RTC_WAIT 5000
/* 5 sec */
#define RTAS_CLOCK_BUSY (-2)
unsigned
long
rtas_get_boot_time
(
void
)
{
int
ret
[
8
];
int
error
,
wait_time
;
unsigned
long
max_wait_tb
;
max_wait_tb
=
__get_tb
()
+
tb_ticks_per_usec
*
1000
*
MAX_RTC_WAIT
;
do
{
error
=
rtas_call
(
rtas_token
(
"get-time-of-day"
),
0
,
8
,
ret
);
if
(
error
==
RTAS_CLOCK_BUSY
||
rtas_is_extended_busy
(
error
))
{
wait_time
=
rtas_extended_busy_delay_time
(
error
);
/* This is boot time so we spin. */
udelay
(
wait_time
*
1000
);
error
=
RTAS_CLOCK_BUSY
;
}
}
while
(
error
==
RTAS_CLOCK_BUSY
&&
(
__get_tb
()
<
max_wait_tb
));
if
(
error
!=
0
&&
printk_ratelimit
())
{
printk
(
KERN_WARNING
"error: reading the clock failed (%d)
\n
"
,
error
);
return
0
;
}
return
mktime
(
ret
[
0
],
ret
[
1
],
ret
[
2
],
ret
[
3
],
ret
[
4
],
ret
[
5
]);
}
/* NOTE: get_rtc_time will get an error if executed in interrupt context
* and if a delay is needed to read the clock. In this case we just
* silently return without updating rtc_tm.
*/
void
rtas_get_rtc_time
(
struct
rtc_time
*
rtc_tm
)
{
int
ret
[
8
];
int
error
,
wait_time
;
unsigned
long
max_wait_tb
;
max_wait_tb
=
__get_tb
()
+
tb_ticks_per_usec
*
1000
*
MAX_RTC_WAIT
;
do
{
error
=
rtas_call
(
rtas_token
(
"get-time-of-day"
),
0
,
8
,
ret
);
if
(
error
==
RTAS_CLOCK_BUSY
||
rtas_is_extended_busy
(
error
))
{
if
(
in_interrupt
()
&&
printk_ratelimit
())
{
printk
(
KERN_WARNING
"error: reading clock would delay interrupt
\n
"
);
return
;
/* delay not allowed */
}
wait_time
=
rtas_extended_busy_delay_time
(
error
);
msleep_interruptible
(
wait_time
);
error
=
RTAS_CLOCK_BUSY
;
}
}
while
(
error
==
RTAS_CLOCK_BUSY
&&
(
__get_tb
()
<
max_wait_tb
));
if
(
error
!=
0
&&
printk_ratelimit
())
{
printk
(
KERN_WARNING
"error: reading the clock failed (%d)
\n
"
,
error
);
return
;
}
rtc_tm
->
tm_sec
=
ret
[
5
];
rtc_tm
->
tm_min
=
ret
[
4
];
rtc_tm
->
tm_hour
=
ret
[
3
];
rtc_tm
->
tm_mday
=
ret
[
2
];
rtc_tm
->
tm_mon
=
ret
[
1
]
-
1
;
rtc_tm
->
tm_year
=
ret
[
0
]
-
1900
;
}
int
rtas_set_rtc_time
(
struct
rtc_time
*
tm
)
{
int
error
,
wait_time
;
unsigned
long
max_wait_tb
;
max_wait_tb
=
__get_tb
()
+
tb_ticks_per_usec
*
1000
*
MAX_RTC_WAIT
;
do
{
error
=
rtas_call
(
rtas_token
(
"set-time-of-day"
),
7
,
1
,
NULL
,
tm
->
tm_year
+
1900
,
tm
->
tm_mon
+
1
,
tm
->
tm_mday
,
tm
->
tm_hour
,
tm
->
tm_min
,
tm
->
tm_sec
,
0
);
if
(
error
==
RTAS_CLOCK_BUSY
||
rtas_is_extended_busy
(
error
))
{
if
(
in_interrupt
())
return
1
;
/* probably decrementer */
wait_time
=
rtas_extended_busy_delay_time
(
error
);
msleep_interruptible
(
wait_time
);
error
=
RTAS_CLOCK_BUSY
;
}
}
while
(
error
==
RTAS_CLOCK_BUSY
&&
(
__get_tb
()
<
max_wait_tb
));
if
(
error
!=
0
&&
printk_ratelimit
())
printk
(
KERN_WARNING
"error: setting the clock failed (%d)
\n
"
,
error
);
return
0
;
}
#endif
arch/ppc64/kernel/semaphore.c
deleted
100644 → 0
View file @
302fe175
/*
*
*
* PowerPC-specific semaphore code.
*
* Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* April 2001 - Reworked by Paul Mackerras <paulus@samba.org>
* to eliminate the SMP races in the old version between the updates
* of `count' and `waking'. Now we use negative `count' values to
* indicate that some process(es) are waiting for the semaphore.
*/
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/module.h>
#include <asm/atomic.h>
#include <asm/semaphore.h>
#include <asm/errno.h>
/*
* Atomically update sem->count.
* This does the equivalent of the following:
*
* old_count = sem->count;
* tmp = MAX(old_count, 0) + incr;
* sem->count = tmp;
* return old_count;
*/
static
inline
int
__sem_update_count
(
struct
semaphore
*
sem
,
int
incr
)
{
int
old_count
,
tmp
;
__asm__
__volatile__
(
"
\n
"
"1: lwarx %0,0,%3
\n
"
" srawi %1,%0,31
\n
"
" andc %1,%0,%1
\n
"
" add %1,%1,%4
\n
"
" stwcx. %1,0,%3
\n
"
" bne 1b"
:
"=&r"
(
old_count
),
"=&r"
(
tmp
),
"=m"
(
sem
->
count
)
:
"r"
(
&
sem
->
count
),
"r"
(
incr
),
"m"
(
sem
->
count
)
:
"cc"
);
return
old_count
;
}
void
__up
(
struct
semaphore
*
sem
)
{
/*
* Note that we incremented count in up() before we came here,
* but that was ineffective since the result was <= 0, and
* any negative value of count is equivalent to 0.
* This ends up setting count to 1, unless count is now > 0
* (i.e. because some other cpu has called up() in the meantime),
* in which case we just increment count.
*/
__sem_update_count
(
sem
,
1
);
wake_up
(
&
sem
->
wait
);
}
EXPORT_SYMBOL
(
__up
);
/*
* Note that when we come in to __down or __down_interruptible,
* we have already decremented count, but that decrement was
* ineffective since the result was < 0, and any negative value
* of count is equivalent to 0.
* Thus it is only when we decrement count from some value > 0
* that we have actually got the semaphore.
*/
void
__sched
__down
(
struct
semaphore
*
sem
)
{
struct
task_struct
*
tsk
=
current
;
DECLARE_WAITQUEUE
(
wait
,
tsk
);
__set_task_state
(
tsk
,
TASK_UNINTERRUPTIBLE
);
add_wait_queue_exclusive
(
&
sem
->
wait
,
&
wait
);
/*
* Try to get the semaphore. If the count is > 0, then we've
* got the semaphore; we decrement count and exit the loop.
* If the count is 0 or negative, we set it to -1, indicating
* that we are asleep, and then sleep.
*/
while
(
__sem_update_count
(
sem
,
-
1
)
<=
0
)
{
schedule
();
set_task_state
(
tsk
,
TASK_UNINTERRUPTIBLE
);
}
remove_wait_queue
(
&
sem
->
wait
,
&
wait
);
__set_task_state
(
tsk
,
TASK_RUNNING
);
/*
* If there are any more sleepers, wake one of them up so
* that it can either get the semaphore, or set count to -1
* indicating that there are still processes sleeping.
*/
wake_up
(
&
sem
->
wait
);
}
EXPORT_SYMBOL
(
__down
);
int
__sched
__down_interruptible
(
struct
semaphore
*
sem
)
{
int
retval
=
0
;
struct
task_struct
*
tsk
=
current
;
DECLARE_WAITQUEUE
(
wait
,
tsk
);
__set_task_state
(
tsk
,
TASK_INTERRUPTIBLE
);
add_wait_queue_exclusive
(
&
sem
->
wait
,
&
wait
);
while
(
__sem_update_count
(
sem
,
-
1
)
<=
0
)
{
if
(
signal_pending
(
current
))
{
/*
* A signal is pending - give up trying.
* Set sem->count to 0 if it is negative,
* since we are no longer sleeping.
*/
__sem_update_count
(
sem
,
0
);
retval
=
-
EINTR
;
break
;
}
schedule
();
set_task_state
(
tsk
,
TASK_INTERRUPTIBLE
);
}
remove_wait_queue
(
&
sem
->
wait
,
&
wait
);
__set_task_state
(
tsk
,
TASK_RUNNING
);
wake_up
(
&
sem
->
wait
);
return
retval
;
}
EXPORT_SYMBOL
(
__down_interruptible
);
arch/ppc64/kernel/vdso.c
deleted
100644 → 0
View file @
302fe175
/*
* linux/arch/ppc64/kernel/vdso.c
*
* Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
* <benh@kernel.crashing.org>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/config.h>
#include <linux/module.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/smp_lock.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/slab.h>
#include <linux/user.h>
#include <linux/elf.h>
#include <linux/security.h>
#include <linux/bootmem.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/processor.h>
#include <asm/mmu.h>
#include <asm/mmu_context.h>
#include <asm/machdep.h>
#include <asm/cputable.h>
#include <asm/sections.h>
#include <asm/systemcfg.h>
#include <asm/vdso.h>
#undef DEBUG
#ifdef DEBUG
#define DBG(fmt...) printk(fmt)
#else
#define DBG(fmt...)
#endif
/*
* The vDSOs themselves are here
*/
extern
char
vdso64_start
,
vdso64_end
;
extern
char
vdso32_start
,
vdso32_end
;
static
void
*
vdso64_kbase
=
&
vdso64_start
;
static
void
*
vdso32_kbase
=
&
vdso32_start
;
unsigned
int
vdso64_pages
;
unsigned
int
vdso32_pages
;
/* Signal trampolines user addresses */
unsigned
long
vdso64_rt_sigtramp
;
unsigned
long
vdso32_sigtramp
;
unsigned
long
vdso32_rt_sigtramp
;
/* Format of the patch table */
struct
vdso_patch_def
{
u32
pvr_mask
,
pvr_value
;
const
char
*
gen_name
;
const
char
*
fix_name
;
};
/* Table of functions to patch based on the CPU type/revision
*
* TODO: Improve by adding whole lists for each entry
*/
static
struct
vdso_patch_def
vdso_patches
[]
=
{
{
0xffff0000
,
0x003a0000
,
/* POWER5 */
"__kernel_sync_dicache"
,
"__kernel_sync_dicache_p5"
},
{
0xffff0000
,
0x003b0000
,
/* POWER5 */
"__kernel_sync_dicache"
,
"__kernel_sync_dicache_p5"
},
};
/*
* Some infos carried around for each of them during parsing at
* boot time.
*/
struct
lib32_elfinfo
{
Elf32_Ehdr
*
hdr
;
/* ptr to ELF */
Elf32_Sym
*
dynsym
;
/* ptr to .dynsym section */
unsigned
long
dynsymsize
;
/* size of .dynsym section */
char
*
dynstr
;
/* ptr to .dynstr section */
unsigned
long
text
;
/* offset of .text section in .so */
};
struct
lib64_elfinfo
{
Elf64_Ehdr
*
hdr
;
Elf64_Sym
*
dynsym
;
unsigned
long
dynsymsize
;
char
*
dynstr
;
unsigned
long
text
;
};
#ifdef __DEBUG
static
void
dump_one_vdso_page
(
struct
page
*
pg
,
struct
page
*
upg
)
{
printk
(
"kpg: %p (c:%d,f:%08lx)"
,
__va
(
page_to_pfn
(
pg
)
<<
PAGE_SHIFT
),
page_count
(
pg
),
pg
->
flags
);
if
(
upg
/* && pg != upg*/
)
{
printk
(
" upg: %p (c:%d,f:%08lx)"
,
__va
(
page_to_pfn
(
upg
)
<<
PAGE_SHIFT
),
page_count
(
upg
),
upg
->
flags
);
}
printk
(
"
\n
"
);
}
static
void
dump_vdso_pages
(
struct
vm_area_struct
*
vma
)
{
int
i
;
if
(
!
vma
||
test_thread_flag
(
TIF_32BIT
))
{
printk
(
"vDSO32 @ %016lx:
\n
"
,
(
unsigned
long
)
vdso32_kbase
);
for
(
i
=
0
;
i
<
vdso32_pages
;
i
++
)
{
struct
page
*
pg
=
virt_to_page
(
vdso32_kbase
+
i
*
PAGE_SIZE
);
struct
page
*
upg
=
(
vma
&&
vma
->
vm_mm
)
?
follow_page
(
vma
->
vm_mm
,
vma
->
vm_start
+
i
*
PAGE_SIZE
,
0
)
:
NULL
;
dump_one_vdso_page
(
pg
,
upg
);
}
}
if
(
!
vma
||
!
test_thread_flag
(
TIF_32BIT
))
{
printk
(
"vDSO64 @ %016lx:
\n
"
,
(
unsigned
long
)
vdso64_kbase
);
for
(
i
=
0
;
i
<
vdso64_pages
;
i
++
)
{
struct
page
*
pg
=
virt_to_page
(
vdso64_kbase
+
i
*
PAGE_SIZE
);
struct
page
*
upg
=
(
vma
&&
vma
->
vm_mm
)
?
follow_page
(
vma
->
vm_mm
,
vma
->
vm_start
+
i
*
PAGE_SIZE
,
0
)
:
NULL
;
dump_one_vdso_page
(
pg
,
upg
);
}
}
}
#endif
/* DEBUG */
/*
* Keep a dummy vma_close for now, it will prevent VMA merging.
*/
static
void
vdso_vma_close
(
struct
vm_area_struct
*
vma
)
{
}
/*
* Our nopage() function, maps in the actual vDSO kernel pages, they will
* be mapped read-only by do_no_page(), and eventually COW'ed, either
* right away for an initial write access, or by do_wp_page().
*/
static
struct
page
*
vdso_vma_nopage
(
struct
vm_area_struct
*
vma
,
unsigned
long
address
,
int
*
type
)
{
unsigned
long
offset
=
address
-
vma
->
vm_start
;
struct
page
*
pg
;
void
*
vbase
=
test_thread_flag
(
TIF_32BIT
)
?
vdso32_kbase
:
vdso64_kbase
;
DBG
(
"vdso_vma_nopage(current: %s, address: %016lx, off: %lx)
\n
"
,
current
->
comm
,
address
,
offset
);
if
(
address
<
vma
->
vm_start
||
address
>
vma
->
vm_end
)
return
NOPAGE_SIGBUS
;
/*
* Last page is systemcfg.
*/
if
((
vma
->
vm_end
-
address
)
<=
PAGE_SIZE
)
pg
=
virt_to_page
(
_systemcfg
);
else
pg
=
virt_to_page
(
vbase
+
offset
);
get_page
(
pg
);
DBG
(
" ->page count: %d
\n
"
,
page_count
(
pg
));
return
pg
;
}
static
struct
vm_operations_struct
vdso_vmops
=
{
.
close
=
vdso_vma_close
,
.
nopage
=
vdso_vma_nopage
,
};
/*
* This is called from binfmt_elf, we create the special vma for the
* vDSO and insert it into the mm struct tree
*/
int
arch_setup_additional_pages
(
struct
linux_binprm
*
bprm
,
int
executable_stack
)
{
struct
mm_struct
*
mm
=
current
->
mm
;
struct
vm_area_struct
*
vma
;
unsigned
long
vdso_pages
;
unsigned
long
vdso_base
;
if
(
test_thread_flag
(
TIF_32BIT
))
{
vdso_pages
=
vdso32_pages
;
vdso_base
=
VDSO32_MBASE
;
}
else
{
vdso_pages
=
vdso64_pages
;
vdso_base
=
VDSO64_MBASE
;
}
current
->
thread
.
vdso_base
=
0
;
/* vDSO has a problem and was disabled, just don't "enable" it for the
* process
*/
if
(
vdso_pages
==
0
)
return
0
;
vma
=
kmem_cache_alloc
(
vm_area_cachep
,
SLAB_KERNEL
);
if
(
vma
==
NULL
)
return
-
ENOMEM
;
memset
(
vma
,
0
,
sizeof
(
*
vma
));
/*
* pick a base address for the vDSO in process space. We try to put it
* at vdso_base which is the "natural" base for it, but we might fail
* and end up putting it elsewhere.
*/
vdso_base
=
get_unmapped_area
(
NULL
,
vdso_base
,
vdso_pages
<<
PAGE_SHIFT
,
0
,
0
);
if
(
vdso_base
&
~
PAGE_MASK
)
{
kmem_cache_free
(
vm_area_cachep
,
vma
);
return
(
int
)
vdso_base
;
}
current
->
thread
.
vdso_base
=
vdso_base
;
vma
->
vm_mm
=
mm
;
vma
->
vm_start
=
current
->
thread
.
vdso_base
;
/*
* the VMA size is one page more than the vDSO since systemcfg
* is mapped in the last one
*/
vma
->
vm_end
=
vma
->
vm_start
+
((
vdso_pages
+
1
)
<<
PAGE_SHIFT
);
/*
* our vma flags don't have VM_WRITE so by default, the process isn't allowed
* to write those pages.
* gdb can break that with ptrace interface, and thus trigger COW on those
* pages but it's then your responsibility to never do that on the "data" page
* of the vDSO or you'll stop getting kernel updates and your nice userland
* gettimeofday will be totally dead. It's fine to use that for setting
* breakpoints in the vDSO code pages though
*/
vma
->
vm_flags
=
VM_READ
|
VM_EXEC
|
VM_MAYREAD
|
VM_MAYWRITE
|
VM_MAYEXEC
|
VM_RESERVED
;
vma
->
vm_flags
|=
mm
->
def_flags
;
vma
->
vm_page_prot
=
protection_map
[
vma
->
vm_flags
&
0x7
];
vma
->
vm_ops
=
&
vdso_vmops
;
down_write
(
&
mm
->
mmap_sem
);
if
(
insert_vm_struct
(
mm
,
vma
))
{
up_write
(
&
mm
->
mmap_sem
);
kmem_cache_free
(
vm_area_cachep
,
vma
);
return
-
ENOMEM
;
}
mm
->
total_vm
+=
(
vma
->
vm_end
-
vma
->
vm_start
)
>>
PAGE_SHIFT
;
up_write
(
&
mm
->
mmap_sem
);
return
0
;
}
static
void
*
__init
find_section32
(
Elf32_Ehdr
*
ehdr
,
const
char
*
secname
,
unsigned
long
*
size
)
{
Elf32_Shdr
*
sechdrs
;
unsigned
int
i
;
char
*
secnames
;
/* Grab section headers and strings so we can tell who is who */
sechdrs
=
(
void
*
)
ehdr
+
ehdr
->
e_shoff
;
secnames
=
(
void
*
)
ehdr
+
sechdrs
[
ehdr
->
e_shstrndx
].
sh_offset
;
/* Find the section they want */
for
(
i
=
1
;
i
<
ehdr
->
e_shnum
;
i
++
)
{
if
(
strcmp
(
secnames
+
sechdrs
[
i
].
sh_name
,
secname
)
==
0
)
{
if
(
size
)
*
size
=
sechdrs
[
i
].
sh_size
;
return
(
void
*
)
ehdr
+
sechdrs
[
i
].
sh_offset
;
}
}
*
size
=
0
;
return
NULL
;
}
static
void
*
__init
find_section64
(
Elf64_Ehdr
*
ehdr
,
const
char
*
secname
,
unsigned
long
*
size
)
{
Elf64_Shdr
*
sechdrs
;
unsigned
int
i
;
char
*
secnames
;
/* Grab section headers and strings so we can tell who is who */
sechdrs
=
(
void
*
)
ehdr
+
ehdr
->
e_shoff
;
secnames
=
(
void
*
)
ehdr
+
sechdrs
[
ehdr
->
e_shstrndx
].
sh_offset
;
/* Find the section they want */
for
(
i
=
1
;
i
<
ehdr
->
e_shnum
;
i
++
)
{
if
(
strcmp
(
secnames
+
sechdrs
[
i
].
sh_name
,
secname
)
==
0
)
{
if
(
size
)
*
size
=
sechdrs
[
i
].
sh_size
;
return
(
void
*
)
ehdr
+
sechdrs
[
i
].
sh_offset
;
}
}
if
(
size
)
*
size
=
0
;
return
NULL
;
}
static
Elf32_Sym
*
__init
find_symbol32
(
struct
lib32_elfinfo
*
lib
,
const
char
*
symname
)
{
unsigned
int
i
;
char
name
[
32
],
*
c
;
for
(
i
=
0
;
i
<
(
lib
->
dynsymsize
/
sizeof
(
Elf32_Sym
));
i
++
)
{
if
(
lib
->
dynsym
[
i
].
st_name
==
0
)
continue
;
strlcpy
(
name
,
lib
->
dynstr
+
lib
->
dynsym
[
i
].
st_name
,
32
);
c
=
strchr
(
name
,
'@'
);
if
(
c
)
*
c
=
0
;
if
(
strcmp
(
symname
,
name
)
==
0
)
return
&
lib
->
dynsym
[
i
];
}
return
NULL
;
}
static
Elf64_Sym
*
__init
find_symbol64
(
struct
lib64_elfinfo
*
lib
,
const
char
*
symname
)
{
unsigned
int
i
;
char
name
[
32
],
*
c
;
for
(
i
=
0
;
i
<
(
lib
->
dynsymsize
/
sizeof
(
Elf64_Sym
));
i
++
)
{
if
(
lib
->
dynsym
[
i
].
st_name
==
0
)
continue
;
strlcpy
(
name
,
lib
->
dynstr
+
lib
->
dynsym
[
i
].
st_name
,
32
);
c
=
strchr
(
name
,
'@'
);
if
(
c
)
*
c
=
0
;
if
(
strcmp
(
symname
,
name
)
==
0
)
return
&
lib
->
dynsym
[
i
];
}
return
NULL
;
}
/* Note that we assume the section is .text and the symbol is relative to
* the library base
*/
static
unsigned
long
__init
find_function32
(
struct
lib32_elfinfo
*
lib
,
const
char
*
symname
)
{
Elf32_Sym
*
sym
=
find_symbol32
(
lib
,
symname
);
if
(
sym
==
NULL
)
{
printk
(
KERN_WARNING
"vDSO32: function %s not found !
\n
"
,
symname
);
return
0
;
}
return
sym
->
st_value
-
VDSO32_LBASE
;
}
/* Note that we assume the section is .text and the symbol is relative to
* the library base
*/
static
unsigned
long
__init
find_function64
(
struct
lib64_elfinfo
*
lib
,
const
char
*
symname
)
{
Elf64_Sym
*
sym
=
find_symbol64
(
lib
,
symname
);
if
(
sym
==
NULL
)
{
printk
(
KERN_WARNING
"vDSO64: function %s not found !
\n
"
,
symname
);
return
0
;
}
#ifdef VDS64_HAS_DESCRIPTORS
return
*
((
u64
*
)(
vdso64_kbase
+
sym
->
st_value
-
VDSO64_LBASE
))
-
VDSO64_LBASE
;
#else
return
sym
->
st_value
-
VDSO64_LBASE
;
#endif
}
static
__init
int
vdso_do_find_sections
(
struct
lib32_elfinfo
*
v32
,
struct
lib64_elfinfo
*
v64
)
{
void
*
sect
;
/*
* Locate symbol tables & text section
*/
v32
->
dynsym
=
find_section32
(
v32
->
hdr
,
".dynsym"
,
&
v32
->
dynsymsize
);
v32
->
dynstr
=
find_section32
(
v32
->
hdr
,
".dynstr"
,
NULL
);
if
(
v32
->
dynsym
==
NULL
||
v32
->
dynstr
==
NULL
)
{
printk
(
KERN_ERR
"vDSO32: a required symbol section was not found
\n
"
);
return
-
1
;
}
sect
=
find_section32
(
v32
->
hdr
,
".text"
,
NULL
);
if
(
sect
==
NULL
)
{
printk
(
KERN_ERR
"vDSO32: the .text section was not found
\n
"
);
return
-
1
;
}
v32
->
text
=
sect
-
vdso32_kbase
;
v64
->
dynsym
=
find_section64
(
v64
->
hdr
,
".dynsym"
,
&
v64
->
dynsymsize
);
v64
->
dynstr
=
find_section64
(
v64
->
hdr
,
".dynstr"
,
NULL
);
if
(
v64
->
dynsym
==
NULL
||
v64
->
dynstr
==
NULL
)
{
printk
(
KERN_ERR
"vDSO64: a required symbol section was not found
\n
"
);
return
-
1
;
}
sect
=
find_section64
(
v64
->
hdr
,
".text"
,
NULL
);
if
(
sect
==
NULL
)
{
printk
(
KERN_ERR
"vDSO64: the .text section was not found
\n
"
);
return
-
1
;
}
v64
->
text
=
sect
-
vdso64_kbase
;
return
0
;
}
static
__init
void
vdso_setup_trampolines
(
struct
lib32_elfinfo
*
v32
,
struct
lib64_elfinfo
*
v64
)
{
/*
* Find signal trampolines
*/
vdso64_rt_sigtramp
=
find_function64
(
v64
,
"__kernel_sigtramp_rt64"
);
vdso32_sigtramp
=
find_function32
(
v32
,
"__kernel_sigtramp32"
);
vdso32_rt_sigtramp
=
find_function32
(
v32
,
"__kernel_sigtramp_rt32"
);
}
static
__init
int
vdso_fixup_datapage
(
struct
lib32_elfinfo
*
v32
,
struct
lib64_elfinfo
*
v64
)
{
Elf32_Sym
*
sym32
;
Elf64_Sym
*
sym64
;
sym32
=
find_symbol32
(
v32
,
"__kernel_datapage_offset"
);
if
(
sym32
==
NULL
)
{
printk
(
KERN_ERR
"vDSO32: Can't find symbol __kernel_datapage_offset !
\n
"
);
return
-
1
;
}
*
((
int
*
)(
vdso32_kbase
+
(
sym32
->
st_value
-
VDSO32_LBASE
)))
=
(
vdso32_pages
<<
PAGE_SHIFT
)
-
(
sym32
->
st_value
-
VDSO32_LBASE
);
sym64
=
find_symbol64
(
v64
,
"__kernel_datapage_offset"
);
if
(
sym64
==
NULL
)
{
printk
(
KERN_ERR
"vDSO64: Can't find symbol __kernel_datapage_offset !
\n
"
);
return
-
1
;
}
*
((
int
*
)(
vdso64_kbase
+
sym64
->
st_value
-
VDSO64_LBASE
))
=
(
vdso64_pages
<<
PAGE_SHIFT
)
-
(
sym64
->
st_value
-
VDSO64_LBASE
);
return
0
;
}
static
int
vdso_do_func_patch32
(
struct
lib32_elfinfo
*
v32
,
struct
lib64_elfinfo
*
v64
,
const
char
*
orig
,
const
char
*
fix
)
{
Elf32_Sym
*
sym32_gen
,
*
sym32_fix
;
sym32_gen
=
find_symbol32
(
v32
,
orig
);
if
(
sym32_gen
==
NULL
)
{
printk
(
KERN_ERR
"vDSO32: Can't find symbol %s !
\n
"
,
orig
);
return
-
1
;
}
sym32_fix
=
find_symbol32
(
v32
,
fix
);
if
(
sym32_fix
==
NULL
)
{
printk
(
KERN_ERR
"vDSO32: Can't find symbol %s !
\n
"
,
fix
);
return
-
1
;
}
sym32_gen
->
st_value
=
sym32_fix
->
st_value
;
sym32_gen
->
st_size
=
sym32_fix
->
st_size
;
sym32_gen
->
st_info
=
sym32_fix
->
st_info
;
sym32_gen
->
st_other
=
sym32_fix
->
st_other
;
sym32_gen
->
st_shndx
=
sym32_fix
->
st_shndx
;
return
0
;
}
static
int
vdso_do_func_patch64
(
struct
lib32_elfinfo
*
v32
,
struct
lib64_elfinfo
*
v64
,
const
char
*
orig
,
const
char
*
fix
)
{
Elf64_Sym
*
sym64_gen
,
*
sym64_fix
;
sym64_gen
=
find_symbol64
(
v64
,
orig
);
if
(
sym64_gen
==
NULL
)
{
printk
(
KERN_ERR
"vDSO64: Can't find symbol %s !
\n
"
,
orig
);
return
-
1
;
}
sym64_fix
=
find_symbol64
(
v64
,
fix
);
if
(
sym64_fix
==
NULL
)
{
printk
(
KERN_ERR
"vDSO64: Can't find symbol %s !
\n
"
,
fix
);
return
-
1
;
}
sym64_gen
->
st_value
=
sym64_fix
->
st_value
;
sym64_gen
->
st_size
=
sym64_fix
->
st_size
;
sym64_gen
->
st_info
=
sym64_fix
->
st_info
;
sym64_gen
->
st_other
=
sym64_fix
->
st_other
;
sym64_gen
->
st_shndx
=
sym64_fix
->
st_shndx
;
return
0
;
}
static
__init
int
vdso_fixup_alt_funcs
(
struct
lib32_elfinfo
*
v32
,
struct
lib64_elfinfo
*
v64
)
{
u32
pvr
;
int
i
;
pvr
=
mfspr
(
SPRN_PVR
);
for
(
i
=
0
;
i
<
ARRAY_SIZE
(
vdso_patches
);
i
++
)
{
struct
vdso_patch_def
*
patch
=
&
vdso_patches
[
i
];
int
match
=
(
pvr
&
patch
->
pvr_mask
)
==
patch
->
pvr_value
;
DBG
(
"patch %d (mask: %x, pvr: %x) : %s
\n
"
,
i
,
patch
->
pvr_mask
,
patch
->
pvr_value
,
match
?
"match"
:
"skip"
);
if
(
!
match
)
continue
;
DBG
(
"replacing %s with %s...
\n
"
,
patch
->
gen_name
,
patch
->
fix_name
);
/*
* Patch the 32 bits and 64 bits symbols. Note that we do not patch
* the "." symbol on 64 bits. It would be easy to do, but doesn't
* seem to be necessary, patching the OPD symbol is enough.
*/
vdso_do_func_patch32
(
v32
,
v64
,
patch
->
gen_name
,
patch
->
fix_name
);
vdso_do_func_patch64
(
v32
,
v64
,
patch
->
gen_name
,
patch
->
fix_name
);
}
return
0
;
}
static
__init
int
vdso_setup
(
void
)
{
struct
lib32_elfinfo
v32
;
struct
lib64_elfinfo
v64
;
v32
.
hdr
=
vdso32_kbase
;
v64
.
hdr
=
vdso64_kbase
;
if
(
vdso_do_find_sections
(
&
v32
,
&
v64
))
return
-
1
;
if
(
vdso_fixup_datapage
(
&
v32
,
&
v64
))
return
-
1
;
if
(
vdso_fixup_alt_funcs
(
&
v32
,
&
v64
))
return
-
1
;
vdso_setup_trampolines
(
&
v32
,
&
v64
);
return
0
;
}
void
__init
vdso_init
(
void
)
{
int
i
;
vdso64_pages
=
(
&
vdso64_end
-
&
vdso64_start
)
>>
PAGE_SHIFT
;
vdso32_pages
=
(
&
vdso32_end
-
&
vdso32_start
)
>>
PAGE_SHIFT
;
DBG
(
"vdso64_kbase: %p, 0x%x pages, vdso32_kbase: %p, 0x%x pages
\n
"
,
vdso64_kbase
,
vdso64_pages
,
vdso32_kbase
,
vdso32_pages
);
/*
* Initialize the vDSO images in memory, that is do necessary
* fixups of vDSO symbols, locate trampolines, etc...
*/
if
(
vdso_setup
())
{
printk
(
KERN_ERR
"vDSO setup failure, not enabled !
\n
"
);
/* XXX should free pages here ? */
vdso64_pages
=
vdso32_pages
=
0
;
return
;
}
/* Make sure pages are in the correct state */
for
(
i
=
0
;
i
<
vdso64_pages
;
i
++
)
{
struct
page
*
pg
=
virt_to_page
(
vdso64_kbase
+
i
*
PAGE_SIZE
);
ClearPageReserved
(
pg
);
get_page
(
pg
);
}
for
(
i
=
0
;
i
<
vdso32_pages
;
i
++
)
{
struct
page
*
pg
=
virt_to_page
(
vdso32_kbase
+
i
*
PAGE_SIZE
);
ClearPageReserved
(
pg
);
get_page
(
pg
);
}
get_page
(
virt_to_page
(
_systemcfg
));
}
int
in_gate_area_no_task
(
unsigned
long
addr
)
{
return
0
;
}
int
in_gate_area
(
struct
task_struct
*
task
,
unsigned
long
addr
)
{
return
0
;
}
struct
vm_area_struct
*
get_gate_vma
(
struct
task_struct
*
tsk
)
{
return
NULL
;
}
arch/ppc64/kernel/vmlinux.lds.S
deleted
100644 → 0
View file @
302fe175
#include <asm/page.h>
#include <asm-generic/vmlinux.lds.h>
OUTPUT_ARCH
(
powerpc
:
common64
)
jiffies
=
jiffies_64
;
SECTIONS
{
/
*
Sections
to
be
discarded
.
*/
/
DISCARD
/
:
{
*(.
exitcall.exit
)
}
/
*
Read
-
only
sections
,
merged
into
text
segment
:
*/
.
text
:
{
*(.
text
.
text
.
*)
SCHED_TEXT
LOCK_TEXT
KPROBES_TEXT
*(.
fixup
)
.
=
ALIGN
(
PAGE_SIZE
)
;
_etext
=
.
;
}
__ex_table
:
{
__start___ex_table
=
.
;
*(
__ex_table
)
__stop___ex_table
=
.
;
}
__bug_table
:
{
__start___bug_table
=
.
;
*(
__bug_table
)
__stop___bug_table
=
.
;
}
__ftr_fixup
:
{
__start___ftr_fixup
=
.
;
*(
__ftr_fixup
)
__stop___ftr_fixup
=
.
;
}
RODATA
/
*
will
be
freed
after
init
*/
.
=
ALIGN
(
PAGE_SIZE
)
;
__init_begin
=
.
;
.
init.text
:
{
_sinittext
=
.
;
*(.
init.text
)
_einittext
=
.
;
}
.
init.data
:
{
*(.
init.data
)
}
.
=
ALIGN
(
16
)
;
.
init.setup
:
{
__setup_start
=
.
;
*(.
init.setup
)
__setup_end
=
.
;
}
.
initcall.init
:
{
__initcall_start
=
.
;
*(.
initcall1.init
)
*(.
initcall2.init
)
*(.
initcall3.init
)
*(.
initcall4.init
)
*(.
initcall5.init
)
*(.
initcall6.init
)
*(.
initcall7.init
)
__initcall_end
=
.
;
}
.
con_initcall
.
init
:
{
__con_initcall_start
=
.
;
*(.
con_initcall
.
init
)
__con_initcall_end
=
.
;
}
SECURITY_INIT
.
=
ALIGN
(
PAGE_SIZE
)
;
.
init.ramfs
:
{
__initramfs_start
=
.
;
*(.
init.ramfs
)
__initramfs_end
=
.
;
}
.
data.percpu
:
{
__per_cpu_start
=
.
;
*(.
data.percpu
)
__per_cpu_end
=
.
;
}
.
=
ALIGN
(
PAGE_SIZE
)
;
.
=
ALIGN
(
16384
)
;
__init_end
=
.
;
/
*
freed
after
init
ends
here
*/
/
*
Read
/
write
sections
*/
.
=
ALIGN
(
PAGE_SIZE
)
;
.
=
ALIGN
(
16384
)
;
_sdata
=
.
;
/
*
The
initial
task
and
kernel
stack
*/
.
data.
init_task
:
{
*(.
data.
init_task
)
}
.
=
ALIGN
(
PAGE_SIZE
)
;
.
data.
page_aligned
:
{
*(.
data.
page_aligned
)
}
.
data.
cacheline_aligned
:
{
*(.
data.
cacheline_aligned
)
}
.
data
:
{
*(.
data
.
data
.
rel
*
.
toc1
)
*(.
branch_lt
)
}
.
opd
:
{
*(.
opd
)
}
.
got
:
{
__toc_start
=
.
;
*(.
got
)
*(.
toc
)
.
=
ALIGN
(
PAGE_SIZE
)
;
_edata
=
.
;
}
.
=
ALIGN
(
PAGE_SIZE
)
;
.
bss
:
{
__bss_start
=
.
;
*(.
bss
)
__bss_stop
=
.
;
}
.
=
ALIGN
(
PAGE_SIZE
)
;
_end
=
.
;
}
arch/ppc64/xmon/privinst.h
deleted
100644 → 0
View file @
302fe175
/*
* Copyright (C) 1996 Paul Mackerras.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#define GETREG(reg) \
static inline unsigned long get_ ## reg (void) \
{ unsigned long ret; asm volatile ("mf" #reg " %0" : "=r" (ret) :); return ret; }
#define SETREG(reg) \
static inline void set_ ## reg (unsigned long val) \
{ asm volatile ("mt" #reg " %0" : : "r" (val)); }
GETREG
(
msr
)
SETREG
(
msrd
)
GETREG
(
cr
)
#define GSETSPR(n, name) \
static inline long get_ ## name (void) \
{ long ret; asm volatile ("mfspr %0," #n : "=r" (ret) : ); return ret; } \
static inline void set_ ## name (long val) \
{ asm volatile ("mtspr " #n ",%0" : : "r" (val)); }
GSETSPR
(
0
,
mq
)
GSETSPR
(
1
,
xer
)
GSETSPR
(
4
,
rtcu
)
GSETSPR
(
5
,
rtcl
)
GSETSPR
(
8
,
lr
)
GSETSPR
(
9
,
ctr
)
GSETSPR
(
18
,
dsisr
)
GSETSPR
(
19
,
dar
)
GSETSPR
(
22
,
dec
)
GSETSPR
(
25
,
sdr1
)
GSETSPR
(
26
,
srr0
)
GSETSPR
(
27
,
srr1
)
GSETSPR
(
272
,
sprg0
)
GSETSPR
(
273
,
sprg1
)
GSETSPR
(
274
,
sprg2
)
GSETSPR
(
275
,
sprg3
)
GSETSPR
(
282
,
ear
)
GSETSPR
(
287
,
pvr
)
GSETSPR
(
1008
,
hid0
)
GSETSPR
(
1009
,
hid1
)
GSETSPR
(
1010
,
iabr
)
GSETSPR
(
1023
,
pir
)
static
inline
void
store_inst
(
void
*
p
)
{
asm
volatile
(
"dcbst 0,%0; sync; icbi 0,%0; isync"
:
:
"r"
(
p
));
}
static
inline
void
cflush
(
void
*
p
)
{
asm
volatile
(
"dcbf 0,%0; icbi 0,%0"
:
:
"r"
(
p
));
}
static
inline
void
cinval
(
void
*
p
)
{
asm
volatile
(
"dcbi 0,%0; icbi 0,%0"
:
:
"r"
(
p
));
}
drivers/char/Kconfig
View file @
0174f72f
...
@@ -735,7 +735,7 @@ config SGI_IP27_RTC
...
@@ -735,7 +735,7 @@ config SGI_IP27_RTC
config GEN_RTC
config GEN_RTC
tristate "Generic /dev/rtc emulation"
tristate "Generic /dev/rtc emulation"
depends on RTC!=y && !IA64 && !ARM && !
PPC64 && !
M32R && !SPARC32 && !SPARC64
depends on RTC!=y && !IA64 && !ARM && !M32R && !SPARC32 && !SPARC64
---help---
---help---
If you say Y here and create a character special file /dev/rtc with
If you say Y here and create a character special file /dev/rtc with
major number 10 and minor number 135 using mknod ("man mknod"), you
major number 10 and minor number 135 using mknod ("man mknod"), you
...
...
include/asm-p
pc64
/btext.h
→
include/asm-p
owerpc
/btext.h
View file @
0174f72f
File moved
include/asm-p
pc64
/delay.h
→
include/asm-p
owerpc
/delay.h
View file @
0174f72f
#ifndef _
PPC64
_DELAY_H
#ifndef _
ASM_POWERPC
_DELAY_H
#define _
PPC64
_DELAY_H
#define _
ASM_POWERPC
_DELAY_H
/*
/*
* Copyright 1996, Paul Mackerras.
* Copyright 1996, Paul Mackerras.
...
@@ -15,10 +15,17 @@
...
@@ -15,10 +15,17 @@
extern
unsigned
long
tb_ticks_per_usec
;
extern
unsigned
long
tb_ticks_per_usec
;
#ifdef CONFIG_PPC64
/* define these here to prevent circular dependencies */
/* define these here to prevent circular dependencies */
/* these instructions control the thread priority on multi-threaded cpus */
#define __HMT_low() asm volatile("or 1,1,1")
#define __HMT_low() asm volatile("or 1,1,1")
#define __HMT_medium() asm volatile("or 2,2,2")
#define __HMT_medium() asm volatile("or 2,2,2")
#define __barrier() asm volatile("":::"memory")
#else
#define __HMT_low()
#define __HMT_medium()
#endif
#define __barrier() asm volatile("" ::: "memory")
static
inline
unsigned
long
__get_tb
(
void
)
static
inline
unsigned
long
__get_tb
(
void
)
{
{
...
@@ -32,7 +39,7 @@ static inline void __delay(unsigned long loops)
...
@@ -32,7 +39,7 @@ static inline void __delay(unsigned long loops)
{
{
unsigned
long
start
=
__get_tb
();
unsigned
long
start
=
__get_tb
();
while
((
__get_tb
()
-
start
)
<
loops
)
while
((
__get_tb
()
-
start
)
<
loops
)
__HMT_low
();
__HMT_low
();
__HMT_medium
();
__HMT_medium
();
__barrier
();
__barrier
();
...
@@ -45,4 +52,4 @@ static inline void udelay(unsigned long usecs)
...
@@ -45,4 +52,4 @@ static inline void udelay(unsigned long usecs)
__delay
(
loops
);
__delay
(
loops
);
}
}
#endif
/* _
PPC64
_DELAY_H */
#endif
/* _
ASM_POWERPC
_DELAY_H */
include/asm-p
pc64
/eeh.h
→
include/asm-p
owerpc
/eeh.h
View file @
0174f72f
File moved
include/asm-p
pc64
/floppy.h
→
include/asm-p
owerpc
/floppy.h
View file @
0174f72f
...
@@ -7,8 +7,8 @@
...
@@ -7,8 +7,8 @@
*
*
* Copyright (C) 1995
* Copyright (C) 1995
*/
*/
#ifndef __ASM_P
PC64
_FLOPPY_H
#ifndef __ASM_P
OWERPC
_FLOPPY_H
#define __ASM_P
PC64
_FLOPPY_H
#define __ASM_P
OWERPC
_FLOPPY_H
#include <linux/config.h>
#include <linux/config.h>
#include <asm/machdep.h>
#include <asm/machdep.h>
...
@@ -18,11 +18,11 @@
...
@@ -18,11 +18,11 @@
#define fd_enable_dma() enable_dma(FLOPPY_DMA)
#define fd_enable_dma() enable_dma(FLOPPY_DMA)
#define fd_disable_dma() disable_dma(FLOPPY_DMA)
#define fd_disable_dma() disable_dma(FLOPPY_DMA)
#define fd_request_dma() request_dma(FLOPPY_DMA,"floppy")
#define fd_request_dma() request_dma(FLOPPY_DMA,
"floppy")
#define fd_free_dma() free_dma(FLOPPY_DMA)
#define fd_free_dma() free_dma(FLOPPY_DMA)
#define fd_clear_dma_ff() clear_dma_ff(FLOPPY_DMA)
#define fd_clear_dma_ff() clear_dma_ff(FLOPPY_DMA)
#define fd_set_dma_mode(mode) set_dma_mode(FLOPPY_DMA,mode)
#define fd_set_dma_mode(mode) set_dma_mode(FLOPPY_DMA,
mode)
#define fd_set_dma_count(count) set_dma_count(FLOPPY_DMA,count)
#define fd_set_dma_count(count) set_dma_count(FLOPPY_DMA,
count)
#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
#define fd_enable_irq() enable_irq(FLOPPY_IRQ)
#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
#define fd_disable_irq() disable_irq(FLOPPY_IRQ)
#define fd_cacheflush(addr,size)
/* nothing */
#define fd_cacheflush(addr,size)
/* nothing */
...
@@ -35,10 +35,10 @@
...
@@ -35,10 +35,10 @@
#include <linux/pci.h>
#include <linux/pci.h>
#define fd_dma_setup(addr,size,mode,io) p
pc64
_fd_dma_setup(addr,size,mode,io)
#define fd_dma_setup(addr,size,mode,io) p
owerpc
_fd_dma_setup(addr,size,mode,io)
static
__inline__
int
static
__inline__
int
powerpc_fd_dma_setup
(
char
*
addr
,
unsigned
long
size
,
ppc64_fd_dma_setup
(
char
*
addr
,
unsigned
long
size
,
int
mode
,
int
io
)
int
mode
,
int
io
)
{
{
static
unsigned
long
prev_size
;
static
unsigned
long
prev_size
;
static
dma_addr_t
bus_addr
=
0
;
static
dma_addr_t
bus_addr
=
0
;
...
@@ -55,9 +55,8 @@ ppc64_fd_dma_setup(char *addr, unsigned long size, int mode, int io)
...
@@ -55,9 +55,8 @@ ppc64_fd_dma_setup(char *addr, unsigned long size, int mode, int io)
bus_addr
=
0
;
bus_addr
=
0
;
}
}
if
(
!
bus_addr
)
/* need to map it */
{
if
(
!
bus_addr
)
/* need to map it */
bus_addr
=
pci_map_single
(
NULL
,
addr
,
size
,
dir
);
bus_addr
=
pci_map_single
(
NULL
,
addr
,
size
,
dir
);
}
/* remember this one as prev */
/* remember this one as prev */
prev_addr
=
addr
;
prev_addr
=
addr
;
...
@@ -103,4 +102,4 @@ static int FDC2 = -1;
...
@@ -103,4 +102,4 @@ static int FDC2 = -1;
#define EXTRA_FLOPPY_PARAMS
#define EXTRA_FLOPPY_PARAMS
#endif
/* __ASM_P
PC64
_FLOPPY_H */
#endif
/* __ASM_P
OWERPC
_FLOPPY_H */
include/asm-p
pc64
/hvconsole.h
→
include/asm-p
owerpc
/hvconsole.h
View file @
0174f72f
File moved
include/asm-p
pc64
/hvcserver.h
→
include/asm-p
owerpc
/hvcserver.h
View file @
0174f72f
File moved
include/asm-powerpc/kexec.h
View file @
0174f72f
...
@@ -40,6 +40,7 @@ extern note_buf_t crash_notes[];
...
@@ -40,6 +40,7 @@ extern note_buf_t crash_notes[];
#ifdef __powerpc64__
#ifdef __powerpc64__
extern
void
kexec_smp_wait
(
void
);
/* get and clear naca physid, wait for
extern
void
kexec_smp_wait
(
void
);
/* get and clear naca physid, wait for
master to copy new code to 0 */
master to copy new code to 0 */
extern
void
__init
kexec_setup
(
void
);
#else
#else
struct
kimage
;
struct
kimage
;
extern
void
machine_kexec_simple
(
struct
kimage
*
image
);
extern
void
machine_kexec_simple
(
struct
kimage
*
image
);
...
...
include/asm-powerpc/machdep.h
View file @
0174f72f
...
@@ -93,7 +93,9 @@ struct machdep_calls {
...
@@ -93,7 +93,9 @@ struct machdep_calls {
void
(
*
init_IRQ
)(
void
);
void
(
*
init_IRQ
)(
void
);
int
(
*
get_irq
)(
struct
pt_regs
*
);
int
(
*
get_irq
)(
struct
pt_regs
*
);
void
(
*
cpu_irq_down
)(
int
secondary
);
#ifdef CONFIG_KEXEC
void
(
*
kexec_cpu_down
)(
int
crash_shutdown
,
int
secondary
);
#endif
/* PCI stuff */
/* PCI stuff */
/* Called after scanning the bus, before allocating resources */
/* Called after scanning the bus, before allocating resources */
...
...
include/asm-p
pc64
/nvram.h
→
include/asm-p
owerpc
/nvram.h
View file @
0174f72f
/*
/*
* PreP compliant NVRAM access
* NVRAM definitions and access functions.
* This needs to be updated for PPC64
*
*
* This program is free software; you can redistribute it and/or
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* modify it under the terms of the GNU General Public License
...
@@ -8,8 +7,8 @@
...
@@ -8,8 +7,8 @@
* 2 of the License, or (at your option) any later version.
* 2 of the License, or (at your option) any later version.
*/
*/
#ifndef _
PPC64
_NVRAM_H
#ifndef _
ASM_POWERPC
_NVRAM_H
#define _
PPC64
_NVRAM_H
#define _
ASM_POWERPC
_NVRAM_H
#define NVRW_CNT 0x20
#define NVRW_CNT 0x20
#define NVRAM_HEADER_LEN 16
/* sizeof(struct nvram_header) */
#define NVRAM_HEADER_LEN 16
/* sizeof(struct nvram_header) */
...
@@ -69,7 +68,6 @@ extern int nvram_clear_error_log(void);
...
@@ -69,7 +68,6 @@ extern int nvram_clear_error_log(void);
extern
struct
nvram_partition
*
nvram_find_partition
(
int
sig
,
const
char
*
name
);
extern
struct
nvram_partition
*
nvram_find_partition
(
int
sig
,
const
char
*
name
);
extern
int
pSeries_nvram_init
(
void
);
extern
int
pSeries_nvram_init
(
void
);
extern
int
pmac_nvram_init
(
void
);
extern
int
mmio_nvram_init
(
void
);
extern
int
mmio_nvram_init
(
void
);
/* PowerMac specific nvram stuffs */
/* PowerMac specific nvram stuffs */
...
@@ -88,7 +86,11 @@ extern u8 pmac_xpram_read(int xpaddr);
...
@@ -88,7 +86,11 @@ extern u8 pmac_xpram_read(int xpaddr);
extern
void
pmac_xpram_write
(
int
xpaddr
,
u8
data
);
extern
void
pmac_xpram_write
(
int
xpaddr
,
u8
data
);
/* Synchronize NVRAM */
/* Synchronize NVRAM */
extern
int
nvram_sync
(
void
);
extern
void
nvram_sync
(
void
);
/* Normal access to NVRAM */
extern
unsigned
char
nvram_read_byte
(
int
i
);
extern
void
nvram_write_byte
(
unsigned
char
c
,
int
i
);
/* Some offsets in XPRAM */
/* Some offsets in XPRAM */
#define PMAC_XPRAM_MACHINE_LOC 0xe4
#define PMAC_XPRAM_MACHINE_LOC 0xe4
...
@@ -112,5 +114,6 @@ struct pmac_machine_location {
...
@@ -112,5 +114,6 @@ struct pmac_machine_location {
_IOWR('p', 0x40, int)
_IOWR('p', 0x40, int)
#define IOC_NVRAM_GET_OFFSET _IOWR('p', 0x42, int)
/* Get NVRAM partition offset */
#define IOC_NVRAM_GET_OFFSET _IOWR('p', 0x42, int)
/* Get NVRAM partition offset */
#define IOC_NVRAM_SYNC _IO('p', 0x43)
/* Sync NVRAM image */
#endif
/* _
PPC64
_NVRAM_H */
#endif
/* _
ASM_POWERPC
_NVRAM_H */
include/asm-powerpc/page.h
0 → 100644
View file @
0174f72f
#ifndef _ASM_POWERPC_PAGE_H
#define _ASM_POWERPC_PAGE_H
/*
* Copyright (C) 2001,2005 IBM Corporation.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#ifdef __KERNEL__
#include <linux/config.h>
#include <asm/asm-compat.h>
/*
* On PPC32 page size is 4K. For PPC64 we support either 4K or 64K software
* page size. When using 64K pages however, whether we are really supporting
* 64K pages in HW or not is irrelevant to those definitions.
*/
#ifdef CONFIG_PPC_64K_PAGES
#define PAGE_SHIFT 16
#else
#define PAGE_SHIFT 12
#endif
#define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
/* We do define AT_SYSINFO_EHDR but don't use the gate mechanism */
#define __HAVE_ARCH_GATE_AREA 1
/*
* Subtle: (1 << PAGE_SHIFT) is an int, not an unsigned long. So if we
* assign PAGE_MASK to a larger type it gets extended the way we want
* (i.e. with 1s in the high bits)
*/
#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1))
#define PAGE_OFFSET ASM_CONST(CONFIG_KERNEL_START)
#define KERNELBASE PAGE_OFFSET
#ifdef CONFIG_DISCONTIGMEM
#define page_to_pfn(page) discontigmem_page_to_pfn(page)
#define pfn_to_page(pfn) discontigmem_pfn_to_page(pfn)
#define pfn_valid(pfn) discontigmem_pfn_valid(pfn)
#endif
#ifdef CONFIG_FLATMEM
#define pfn_to_page(pfn) (mem_map + (pfn))
#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#endif
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
#define __va(x) ((void *)((unsigned long)(x) + KERNELBASE))
#define __pa(x) ((unsigned long)(x) - PAGE_OFFSET)
/*
* Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
* and needs to be executable. This means the whole heap ends
* up being executable.
*/
#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#ifdef __powerpc64__
#include <asm/page_64.h>
#else
#include <asm/page_32.h>
#endif
/* align addr on a size boundary - adjust address up/down if needed */
#define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1)))
#define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1)))
/* align addr on a size boundary - adjust address up if needed */
#define _ALIGN(addr,size) _ALIGN_UP(addr,size)
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE)
#ifndef __ASSEMBLY__
#undef STRICT_MM_TYPECHECKS
#ifdef STRICT_MM_TYPECHECKS
/* These are used to make use of C type-checking. */
/* PTE level */
typedef
struct
{
pte_basic_t
pte
;
}
pte_t
;
#define pte_val(x) ((x).pte)
#define __pte(x) ((pte_t) { (x) })
/* 64k pages additionally define a bigger "real PTE" type that gathers
* the "second half" part of the PTE for pseudo 64k pages
*/
#ifdef CONFIG_PPC_64K_PAGES
typedef
struct
{
pte_t
pte
;
unsigned
long
hidx
;
}
real_pte_t
;
#else
typedef
struct
{
pte_t
pte
;
}
real_pte_t
;
#endif
/* PMD level */
typedef
struct
{
unsigned
long
pmd
;
}
pmd_t
;
#define pmd_val(x) ((x).pmd)
#define __pmd(x) ((pmd_t) { (x) })
/* PUD level exusts only on 4k pages */
#ifndef CONFIG_PPC_64K_PAGES
typedef
struct
{
unsigned
long
pud
;
}
pud_t
;
#define pud_val(x) ((x).pud)
#define __pud(x) ((pud_t) { (x) })
#endif
/* PGD level */
typedef
struct
{
unsigned
long
pgd
;
}
pgd_t
;
#define pgd_val(x) ((x).pgd)
#define __pgd(x) ((pgd_t) { (x) })
/* Page protection bits */
typedef
struct
{
unsigned
long
pgprot
;
}
pgprot_t
;
#define pgprot_val(x) ((x).pgprot)
#define __pgprot(x) ((pgprot_t) { (x) })
#else
/*
* .. while these make it easier on the compiler
*/
typedef
pte_basic_t
pte_t
;
#define pte_val(x) (x)
#define __pte(x) (x)
#ifdef CONFIG_PPC_64K_PAGES
typedef
struct
{
pte_t
pte
;
unsigned
long
hidx
;
}
real_pte_t
;
#else
typedef
unsigned
long
real_pte_t
;
#endif
typedef
unsigned
long
pmd_t
;
#define pmd_val(x) (x)
#define __pmd(x) (x)
#ifndef CONFIG_PPC_64K_PAGES
typedef
unsigned
long
pud_t
;
#define pud_val(x) (x)
#define __pud(x) (x)
#endif
typedef
unsigned
long
pgd_t
;
#define pgd_val(x) (x)
#define pgprot_val(x) (x)
typedef
unsigned
long
pgprot_t
;
#define __pgd(x) (x)
#define __pgprot(x) (x)
#endif
struct
page
;
extern
void
clear_user_page
(
void
*
page
,
unsigned
long
vaddr
,
struct
page
*
pg
);
extern
void
copy_user_page
(
void
*
to
,
void
*
from
,
unsigned
long
vaddr
,
struct
page
*
p
);
extern
int
page_is_ram
(
unsigned
long
pfn
);
#endif
/* __ASSEMBLY__ */
#endif
/* __KERNEL__ */
#endif
/* _ASM_POWERPC_PAGE_H */
include/asm-powerpc/page_32.h
0 → 100644
View file @
0174f72f
#ifndef _ASM_POWERPC_PAGE_32_H
#define _ASM_POWERPC_PAGE_32_H
#define VM_DATA_DEFAULT_FLAGS VM_DATA_DEFAULT_FLAGS32
#define PPC_MEMSTART 0
#ifndef __ASSEMBLY__
/*
* The basic type of a PTE - 64 bits for those CPUs with > 32 bit
* physical addressing. For now this just the IBM PPC440.
*/
#ifdef CONFIG_PTE_64BIT
typedef
unsigned
long
long
pte_basic_t
;
#define PTE_SHIFT (PAGE_SHIFT - 3)
/* 512 ptes per page */
#define PTE_FMT "%16Lx"
#else
typedef
unsigned
long
pte_basic_t
;
#define PTE_SHIFT (PAGE_SHIFT - 2)
/* 1024 ptes per page */
#define PTE_FMT "%.8lx"
#endif
struct
page
;
extern
void
clear_pages
(
void
*
page
,
int
order
);
static
inline
void
clear_page
(
void
*
page
)
{
clear_pages
(
page
,
0
);
}
extern
void
copy_page
(
void
*
to
,
void
*
from
);
/* Pure 2^n version of get_order */
extern
__inline__
int
get_order
(
unsigned
long
size
)
{
int
lz
;
size
=
(
size
-
1
)
>>
PAGE_SHIFT
;
asm
(
"cntlzw %0,%1"
:
"=r"
(
lz
)
:
"r"
(
size
));
return
32
-
lz
;
}
#endif
/* __ASSEMBLY__ */
#endif
/* _ASM_POWERPC_PAGE_32_H */
include/asm-p
pc64/page
.h
→
include/asm-p
owerpc/page_64
.h
View file @
0174f72f
#ifndef _
PPC64_PAGE
_H
#ifndef _
ASM_POWERPC_PAGE_64
_H
#define _
PPC64_PAGE
_H
#define _
ASM_POWERPC_PAGE_64
_H
/*
/*
* Copyright (C) 2001 PPC64 Team, IBM Corp
* Copyright (C) 2001 PPC64 Team, IBM Corp
...
@@ -10,54 +10,91 @@
...
@@ -10,54 +10,91 @@
* 2 of the License, or (at your option) any later version.
* 2 of the License, or (at your option) any later version.
*/
*/
#include <linux/config.h>
#include <asm/asm-compat.h>
/*
/*
* We support either 4k or 64k software page size. When using 64k pages
* We always define HW_PAGE_SHIFT to 12 as use of 64K pages remains Linux
* however, wether we are really supporting 64k pages in HW or not is
* specific, every notion of page number shared with the firmware, TCEs,
* irrelevant to those definitions. We always define HW_PAGE_SHIFT to 12
* iommu, etc... still uses a page size of 4K.
* as use of 64k pages remains a linux kernel specific, every notion of
* page number shared with the firmware, TCEs, iommu, etc... still assumes
* a page size of 4096.
*/
*/
#ifdef CONFIG_PPC_64K_PAGES
#define PAGE_SHIFT 16
#else
#define PAGE_SHIFT 12
#endif
#define PAGE_SIZE (ASM_CONST(1) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE-1))
/* HW_PAGE_SHIFT is always 4k pages */
#define HW_PAGE_SHIFT 12
#define HW_PAGE_SHIFT 12
#define HW_PAGE_SIZE (ASM_CONST(1) << HW_PAGE_SHIFT)
#define HW_PAGE_SIZE (ASM_CONST(1) << HW_PAGE_SHIFT)
#define HW_PAGE_MASK (~(HW_PAGE_SIZE-1))
#define HW_PAGE_MASK (~(HW_PAGE_SIZE-1))
/* PAGE_FACTOR is the number of bits factor between PAGE_SHIFT and
/*
* HW_PAGE_SHIFT, that is 4k pages
* PAGE_FACTOR is the number of bits factor between PAGE_SHIFT and
* HW_PAGE_SHIFT, that is 4K pages.
*/
*/
#define PAGE_FACTOR (PAGE_SHIFT - HW_PAGE_SHIFT)
#define PAGE_FACTOR (PAGE_SHIFT - HW_PAGE_SHIFT)
#define REGION_SIZE 4UL
#define REGION_SHIFT 60UL
#define REGION_MASK (((1UL<<REGION_SIZE)-1UL)<<REGION_SHIFT)
#define VMALLOCBASE ASM_CONST(0xD000000000000000)
#define VMALLOC_REGION_ID (VMALLOCBASE >> REGION_SHIFT)
#define KERNEL_REGION_ID (KERNELBASE >> REGION_SHIFT)
#define USER_REGION_ID (0UL)
#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT)
/* Segment size */
/* Segment size */
#define SID_SHIFT 28
#define SID_SHIFT 28
#define SID_MASK 0xfffffffffUL
#define SID_MASK 0xfffffffffUL
#define ESID_MASK 0xfffffffff0000000UL
#define ESID_MASK 0xfffffffff0000000UL
#define GET_ESID(x) (((x) >> SID_SHIFT) & SID_MASK)
#define GET_ESID(x) (((x) >> SID_SHIFT) & SID_MASK)
/* Large pages size */
#ifndef __ASSEMBLY__
#ifndef __ASSEMBLY__
#include <asm/cache.h>
typedef
unsigned
long
pte_basic_t
;
static
__inline__
void
clear_page
(
void
*
addr
)
{
unsigned
long
lines
,
line_size
;
line_size
=
ppc64_caches
.
dline_size
;
lines
=
ppc64_caches
.
dlines_per_page
;
__asm__
__volatile__
(
"mtctr %1 # clear_page
\n
\
1: dcbz 0,%0
\n
\
add %0,%0,%3
\n
\
bdnz+ 1b"
:
"=r"
(
addr
)
:
"r"
(
lines
),
"0"
(
addr
),
"r"
(
line_size
)
:
"ctr"
,
"memory"
);
}
extern
void
copy_4K_page
(
void
*
to
,
void
*
from
);
#ifdef CONFIG_PPC_64K_PAGES
static
inline
void
copy_page
(
void
*
to
,
void
*
from
)
{
unsigned
int
i
;
for
(
i
=
0
;
i
<
(
1
<<
(
PAGE_SHIFT
-
12
));
i
++
)
{
copy_4K_page
(
to
,
from
);
to
+=
4096
;
from
+=
4096
;
}
}
#else
/* CONFIG_PPC_64K_PAGES */
static
inline
void
copy_page
(
void
*
to
,
void
*
from
)
{
copy_4K_page
(
to
,
from
);
}
#endif
/* CONFIG_PPC_64K_PAGES */
/* Log 2 of page table size */
extern
u64
ppc64_pft_size
;
/* Large pages size */
extern
unsigned
int
HPAGE_SHIFT
;
extern
unsigned
int
HPAGE_SHIFT
;
#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
#define HPAGE_SIZE ((1UL) << HPAGE_SHIFT)
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HPAGE_MASK (~(HPAGE_SIZE - 1))
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
#endif
/* __ASSEMBLY__ */
#endif
/* __ASSEMBLY__ */
#ifdef CONFIG_HUGETLB_PAGE
#ifdef CONFIG_HUGETLB_PAGE
#define HTLB_AREA_SHIFT 40
#define HTLB_AREA_SHIFT 40
#define HTLB_AREA_SIZE (1UL << HTLB_AREA_SHIFT)
#define HTLB_AREA_SIZE (1UL << HTLB_AREA_SHIFT)
#define GET_HTLB_AREA(x) ((x) >> HTLB_AREA_SHIFT)
#define GET_HTLB_AREA(x) ((x) >> HTLB_AREA_SHIFT)
...
@@ -104,156 +141,6 @@ extern unsigned int HPAGE_SHIFT;
...
@@ -104,156 +141,6 @@ extern unsigned int HPAGE_SHIFT;
#endif
/* !CONFIG_HUGETLB_PAGE */
#endif
/* !CONFIG_HUGETLB_PAGE */
/* align addr on a size boundary - adjust address up/down if needed */
#define _ALIGN_UP(addr,size) (((addr)+((size)-1))&(~((size)-1)))
#define _ALIGN_DOWN(addr,size) ((addr)&(~((size)-1)))
/* align addr on a size boundary - adjust address up if needed */
#define _ALIGN(addr,size) _ALIGN_UP(addr,size)
/* to align the pointer to the (next) page boundary */
#define PAGE_ALIGN(addr) _ALIGN(addr, PAGE_SIZE)
#ifdef __KERNEL__
#ifndef __ASSEMBLY__
#include <asm/cache.h>
#undef STRICT_MM_TYPECHECKS
#define REGION_SIZE 4UL
#define REGION_SHIFT 60UL
#define REGION_MASK (((1UL<<REGION_SIZE)-1UL)<<REGION_SHIFT)
static
__inline__
void
clear_page
(
void
*
addr
)
{
unsigned
long
lines
,
line_size
;
line_size
=
ppc64_caches
.
dline_size
;
lines
=
ppc64_caches
.
dlines_per_page
;
__asm__
__volatile__
(
"mtctr %1 # clear_page
\n
\
1: dcbz 0,%0
\n
\
add %0,%0,%3
\n
\
bdnz+ 1b"
:
"=r"
(
addr
)
:
"r"
(
lines
),
"0"
(
addr
),
"r"
(
line_size
)
:
"ctr"
,
"memory"
);
}
extern
void
copy_4K_page
(
void
*
to
,
void
*
from
);
#ifdef CONFIG_PPC_64K_PAGES
static
inline
void
copy_page
(
void
*
to
,
void
*
from
)
{
unsigned
int
i
;
for
(
i
=
0
;
i
<
(
1
<<
(
PAGE_SHIFT
-
12
));
i
++
)
{
copy_4K_page
(
to
,
from
);
to
+=
4096
;
from
+=
4096
;
}
}
#else
/* CONFIG_PPC_64K_PAGES */
static
inline
void
copy_page
(
void
*
to
,
void
*
from
)
{
copy_4K_page
(
to
,
from
);
}
#endif
/* CONFIG_PPC_64K_PAGES */
struct
page
;
extern
void
clear_user_page
(
void
*
page
,
unsigned
long
vaddr
,
struct
page
*
pg
);
extern
void
copy_user_page
(
void
*
to
,
void
*
from
,
unsigned
long
vaddr
,
struct
page
*
p
);
#ifdef STRICT_MM_TYPECHECKS
/*
* These are used to make use of C type-checking.
* Entries in the pte table are 64b, while entries in the pgd & pmd are 32b.
*/
/* PTE level */
typedef
struct
{
unsigned
long
pte
;
}
pte_t
;
#define pte_val(x) ((x).pte)
#define __pte(x) ((pte_t) { (x) })
/* 64k pages additionally define a bigger "real PTE" type that gathers
* the "second half" part of the PTE for pseudo 64k pages
*/
#ifdef CONFIG_PPC_64K_PAGES
typedef
struct
{
pte_t
pte
;
unsigned
long
hidx
;
}
real_pte_t
;
#else
typedef
struct
{
pte_t
pte
;
}
real_pte_t
;
#endif
/* PMD level */
typedef
struct
{
unsigned
long
pmd
;
}
pmd_t
;
#define pmd_val(x) ((x).pmd)
#define __pmd(x) ((pmd_t) { (x) })
/* PUD level exusts only on 4k pages */
#ifndef CONFIG_PPC_64K_PAGES
typedef
struct
{
unsigned
long
pud
;
}
pud_t
;
#define pud_val(x) ((x).pud)
#define __pud(x) ((pud_t) { (x) })
#endif
/* PGD level */
typedef
struct
{
unsigned
long
pgd
;
}
pgd_t
;
#define pgd_val(x) ((x).pgd)
#define __pgd(x) ((pgd_t) { (x) })
/* Page protection bits */
typedef
struct
{
unsigned
long
pgprot
;
}
pgprot_t
;
#define pgprot_val(x) ((x).pgprot)
#define __pgprot(x) ((pgprot_t) { (x) })
#else
/*
* .. while these make it easier on the compiler
*/
typedef
unsigned
long
pte_t
;
#define pte_val(x) (x)
#define __pte(x) (x)
#ifdef CONFIG_PPC_64K_PAGES
typedef
struct
{
pte_t
pte
;
unsigned
long
hidx
;
}
real_pte_t
;
#else
typedef
unsigned
long
real_pte_t
;
#endif
typedef
unsigned
long
pmd_t
;
#define pmd_val(x) (x)
#define __pmd(x) (x)
#ifndef CONFIG_PPC_64K_PAGES
typedef
unsigned
long
pud_t
;
#define pud_val(x) (x)
#define __pud(x) (x)
#endif
typedef
unsigned
long
pgd_t
;
#define pgd_val(x) (x)
#define pgprot_val(x) (x)
typedef
unsigned
long
pgprot_t
;
#define __pgd(x) (x)
#define __pgprot(x) (x)
#endif
#define __pa(x) ((unsigned long)(x)-PAGE_OFFSET)
extern
int
page_is_ram
(
unsigned
long
pfn
);
extern
u64
ppc64_pft_size
;
/* Log 2 of page table size */
/* We do define AT_SYSINFO_EHDR but don't use the gate mecanism */
#define __HAVE_ARCH_GATE_AREA 1
#endif
/* __ASSEMBLY__ */
#ifdef MODULE
#ifdef MODULE
#define __page_aligned __attribute__((__aligned__(PAGE_SIZE)))
#define __page_aligned __attribute__((__aligned__(PAGE_SIZE)))
#else
#else
...
@@ -262,45 +149,6 @@ extern u64 ppc64_pft_size; /* Log 2 of page table size */
...
@@ -262,45 +149,6 @@ extern u64 ppc64_pft_size; /* Log 2 of page table size */
__section__(".data.page_aligned")))
__section__(".data.page_aligned")))
#endif
#endif
/* This must match the -Ttext linker address */
/* Note: tophys & tovirt make assumptions about how */
/* KERNELBASE is defined for performance reasons. */
/* When KERNELBASE moves, those macros may have */
/* to change! */
#define PAGE_OFFSET ASM_CONST(0xC000000000000000)
#define KERNELBASE PAGE_OFFSET
#define VMALLOCBASE ASM_CONST(0xD000000000000000)
#define VMALLOC_REGION_ID (VMALLOCBASE >> REGION_SHIFT)
#define KERNEL_REGION_ID (KERNELBASE >> REGION_SHIFT)
#define USER_REGION_ID (0UL)
#define REGION_ID(ea) (((unsigned long)(ea)) >> REGION_SHIFT)
#define __va(x) ((void *)((unsigned long)(x) + KERNELBASE))
#ifdef CONFIG_FLATMEM
#define pfn_to_page(pfn) (mem_map + (pfn))
#define page_to_pfn(page) ((unsigned long)((page) - mem_map))
#define pfn_valid(pfn) ((pfn) < max_mapnr)
#endif
#define virt_to_page(kaddr) pfn_to_page(__pa(kaddr) >> PAGE_SHIFT)
#define pfn_to_kaddr(pfn) __va((pfn) << PAGE_SHIFT)
#define virt_addr_valid(kaddr) pfn_valid(__pa(kaddr) >> PAGE_SHIFT)
/*
* Unfortunately the PLT is in the BSS in the PPC32 ELF ABI,
* and needs to be executable. This means the whole heap ends
* up being executable.
*/
#define VM_DATA_DEFAULT_FLAGS32 (VM_READ | VM_WRITE | VM_EXEC | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#define VM_DATA_DEFAULT_FLAGS64 (VM_READ | VM_WRITE | \
VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#define VM_DATA_DEFAULT_FLAGS \
#define VM_DATA_DEFAULT_FLAGS \
(test_thread_flag(TIF_32BIT) ? \
(test_thread_flag(TIF_32BIT) ? \
VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
VM_DATA_DEFAULT_FLAGS32 : VM_DATA_DEFAULT_FLAGS64)
...
@@ -321,8 +169,6 @@ extern u64 ppc64_pft_size; /* Log 2 of page table size */
...
@@ -321,8 +169,6 @@ extern u64 ppc64_pft_size; /* Log 2 of page table size */
(test_thread_flag(TIF_32BIT) ? \
(test_thread_flag(TIF_32BIT) ? \
VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
VM_STACK_DEFAULT_FLAGS32 : VM_STACK_DEFAULT_FLAGS64)
#endif
/* __KERNEL__ */
#include <asm-generic/page.h>
#include <asm-generic/page.h>
#endif
/* _
PPC64_PAGE
_H */
#endif
/* _
ASM_POWERPC_PAGE_64
_H */
include/asm-p
pc64
/serial.h
→
include/asm-p
owerpc
/serial.h
View file @
0174f72f
/*
/*
* include/asm-ppc64/serial.h
*/
#ifndef _PPC64_SERIAL_H
#define _PPC64_SERIAL_H
/*
* This assumes you have a 1.8432 MHz clock for your UART.
*
* It'd be nice if someone built a serial card with a 24.576 MHz
* clock, since the 16550A is capable of handling a top speed of 1.5
* megabits/second; but this requires the faster clock.
*
* This program is free software; you can redistribute it and/or
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
* 2 of the License, or (at your option) any later version.
*/
*/
#ifndef _ASM_POWERPC_SERIAL_H
#define _ASM_POWERPC_SERIAL_H
/*
* Serial ports are not listed here, because they are discovered
* through the device tree.
*/
/* Default baud base if not found in device-tree */
/* Default baud base if not found in device-tree */
#define BASE_BAUD ( 1843200 / 16 )
#define BASE_BAUD ( 1843200 / 16 )
...
...
include/asm-powerpc/vdso_datapage.h
View file @
0174f72f
...
@@ -73,7 +73,7 @@ struct vdso_data {
...
@@ -73,7 +73,7 @@ struct vdso_data {
/* those additional ones don't have to be located anywhere
/* those additional ones don't have to be located anywhere
* special as they were not part of the original systemcfg
* special as they were not part of the original systemcfg
*/
*/
__s
64
wtom_clock_sec
;
/* Wall to monotonic clock */
__s
32
wtom_clock_sec
;
/* Wall to monotonic clock */
__s32
wtom_clock_nsec
;
__s32
wtom_clock_nsec
;
__u32
syscall_map_64
[
SYSCALL_MAP_SIZE
];
/* map of syscalls */
__u32
syscall_map_64
[
SYSCALL_MAP_SIZE
];
/* map of syscalls */
__u32
syscall_map_32
[
SYSCALL_MAP_SIZE
];
/* map of syscalls */
__u32
syscall_map_32
[
SYSCALL_MAP_SIZE
];
/* map of syscalls */
...
...
include/asm-ppc/nvram.h
deleted
100644 → 0
View file @
302fe175
/*
* PreP compliant NVRAM access
*/
#ifdef __KERNEL__
#ifndef _PPC_NVRAM_H
#define _PPC_NVRAM_H
#define NVRAM_AS0 0x74
#define NVRAM_AS1 0x75
#define NVRAM_DATA 0x77
/* RTC Offsets */
#define MOTO_RTC_SECONDS 0x1FF9
#define MOTO_RTC_MINUTES 0x1FFA
#define MOTO_RTC_HOURS 0x1FFB
#define MOTO_RTC_DAY_OF_WEEK 0x1FFC
#define MOTO_RTC_DAY_OF_MONTH 0x1FFD
#define MOTO_RTC_MONTH 0x1FFE
#define MOTO_RTC_YEAR 0x1FFF
#define MOTO_RTC_CONTROLA 0x1FF8
#define MOTO_RTC_CONTROLB 0x1FF9
/* PowerMac specific nvram stuffs */
enum
{
pmac_nvram_OF
,
/* Open Firmware partition */
pmac_nvram_XPRAM
,
/* MacOS XPRAM partition */
pmac_nvram_NR
/* MacOS Name Registry partition */
};
/* Return partition offset in nvram */
extern
int
pmac_get_partition
(
int
partition
);
/* Direct access to XPRAM on PowerMacs */
extern
u8
pmac_xpram_read
(
int
xpaddr
);
extern
void
pmac_xpram_write
(
int
xpaddr
,
u8
data
);
/* Synchronize NVRAM */
extern
void
nvram_sync
(
void
);
/* Normal access to NVRAM */
extern
unsigned
char
nvram_read_byte
(
int
i
);
extern
void
nvram_write_byte
(
unsigned
char
c
,
int
i
);
/* Some offsets in XPRAM */
#define PMAC_XPRAM_MACHINE_LOC 0xe4
#define PMAC_XPRAM_SOUND_VOLUME 0x08
/* Machine location structure in PowerMac XPRAM */
struct
pmac_machine_location
{
unsigned
int
latitude
;
/* 2+30 bit Fractional number */
unsigned
int
longitude
;
/* 2+30 bit Fractional number */
unsigned
int
delta
;
/* mix of GMT delta and DLS */
};
/*
* /dev/nvram ioctls
*
* Note that PMAC_NVRAM_GET_OFFSET is still supported, but is
* definitely obsolete. Do not use it if you can avoid it
*/
#define OBSOLETE_PMAC_NVRAM_GET_OFFSET \
_IOWR('p', 0x40, int)
#define IOC_NVRAM_GET_OFFSET _IOWR('p', 0x42, int)
/* Get NVRAM partition offset */
#define IOC_NVRAM_SYNC _IO('p', 0x43)
/* Sync NVRAM image */
#endif
#endif
/* __KERNEL__ */
include/asm-ppc64/prom.h
deleted
100644 → 0
View file @
302fe175
#ifndef _PPC64_PROM_H
#define _PPC64_PROM_H
/*
* Definitions for talking to the Open Firmware PROM on
* Power Macintosh computers.
*
* Copyright (C) 1996 Paul Mackerras.
*
* Updates for PPC64 by Peter Bergner & David Engebretsen, IBM Corp.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/config.h>
#include <linux/proc_fs.h>
#include <asm/atomic.h>
#define PTRRELOC(x) ((typeof(x))((unsigned long)(x) - offset))
#define PTRUNRELOC(x) ((typeof(x))((unsigned long)(x) + offset))
#define RELOC(x) (*PTRRELOC(&(x)))
/* Definitions used by the flattened device tree */
#define OF_DT_HEADER 0xd00dfeed
/* marker */
#define OF_DT_BEGIN_NODE 0x1
/* Start of node, full name */
#define OF_DT_END_NODE 0x2
/* End node */
#define OF_DT_PROP 0x3
/* Property: name off, size,
* content */
#define OF_DT_NOP 0x4
/* nop */
#define OF_DT_END 0x9
#define OF_DT_VERSION 0x10
/*
* This is what gets passed to the kernel by prom_init or kexec
*
* The dt struct contains the device tree structure, full pathes and
* property contents. The dt strings contain a separate block with just
* the strings for the property names, and is fully page aligned and
* self contained in a page, so that it can be kept around by the kernel,
* each property name appears only once in this page (cheap compression)
*
* the mem_rsvmap contains a map of reserved ranges of physical memory,
* passing it here instead of in the device-tree itself greatly simplifies
* the job of everybody. It's just a list of u64 pairs (base/size) that
* ends when size is 0
*/
struct
boot_param_header
{
u32
magic
;
/* magic word OF_DT_HEADER */
u32
totalsize
;
/* total size of DT block */
u32
off_dt_struct
;
/* offset to structure */
u32
off_dt_strings
;
/* offset to strings */
u32
off_mem_rsvmap
;
/* offset to memory reserve map */
u32
version
;
/* format version */
u32
last_comp_version
;
/* last compatible version */
/* version 2 fields below */
u32
boot_cpuid_phys
;
/* Physical CPU id we're booting on */
/* version 3 fields below */
u32
dt_strings_size
;
/* size of the DT strings block */
};
typedef
u32
phandle
;
typedef
u32
ihandle
;
struct
address_range
{
unsigned
long
space
;
unsigned
long
address
;
unsigned
long
size
;
};
struct
interrupt_info
{
int
line
;
int
sense
;
/* +ve/-ve logic, edge or level, etc. */
};
struct
pci_address
{
u32
a_hi
;
u32
a_mid
;
u32
a_lo
;
};
struct
isa_address
{
u32
a_hi
;
u32
a_lo
;
};
struct
isa_range
{
struct
isa_address
isa_addr
;
struct
pci_address
pci_addr
;
unsigned
int
size
;
};
struct
reg_property
{
unsigned
long
address
;
unsigned
long
size
;
};
struct
reg_property32
{
unsigned
int
address
;
unsigned
int
size
;
};
struct
reg_property64
{
unsigned
long
address
;
unsigned
long
size
;
};
struct
property
{
char
*
name
;
int
length
;
unsigned
char
*
value
;
struct
property
*
next
;
};
struct
device_node
{
char
*
name
;
char
*
type
;
phandle
node
;
phandle
linux_phandle
;
int
n_addrs
;
struct
address_range
*
addrs
;
int
n_intrs
;
struct
interrupt_info
*
intrs
;
char
*
full_name
;
struct
property
*
properties
;
struct
device_node
*
parent
;
struct
device_node
*
child
;
struct
device_node
*
sibling
;
struct
device_node
*
next
;
/* next device of same type */
struct
device_node
*
allnext
;
/* next in list of all nodes */
struct
proc_dir_entry
*
pde
;
/* this node's proc directory */
struct
kref
kref
;
unsigned
long
_flags
;
void
*
data
;
#ifdef CONFIG_PPC_ISERIES
struct
list_head
Device_List
;
#endif
};
extern
struct
device_node
*
of_chosen
;
/* flag descriptions */
#define OF_DYNAMIC 1
/* node and properties were allocated via kmalloc */
#define OF_IS_DYNAMIC(x) test_bit(OF_DYNAMIC, &x->_flags)
#define OF_MARK_DYNAMIC(x) set_bit(OF_DYNAMIC, &x->_flags)
/*
* Until 32-bit ppc can add proc_dir_entries to its device_node
* definition, we cannot refer to pde, name_link, and addr_link
* in arch-independent code.
*/
#define HAVE_ARCH_DEVTREE_FIXUPS
static
inline
void
set_node_proc_entry
(
struct
device_node
*
dn
,
struct
proc_dir_entry
*
de
)
{
dn
->
pde
=
de
;
}
/* OBSOLETE: Old stlye node lookup */
extern
struct
device_node
*
find_devices
(
const
char
*
name
);
extern
struct
device_node
*
find_type_devices
(
const
char
*
type
);
extern
struct
device_node
*
find_path_device
(
const
char
*
path
);
extern
struct
device_node
*
find_compatible_devices
(
const
char
*
type
,
const
char
*
compat
);
extern
struct
device_node
*
find_all_nodes
(
void
);
/* New style node lookup */
extern
struct
device_node
*
of_find_node_by_name
(
struct
device_node
*
from
,
const
char
*
name
);
extern
struct
device_node
*
of_find_node_by_type
(
struct
device_node
*
from
,
const
char
*
type
);
extern
struct
device_node
*
of_find_compatible_node
(
struct
device_node
*
from
,
const
char
*
type
,
const
char
*
compat
);
extern
struct
device_node
*
of_find_node_by_path
(
const
char
*
path
);
extern
struct
device_node
*
of_find_node_by_phandle
(
phandle
handle
);
extern
struct
device_node
*
of_find_all_nodes
(
struct
device_node
*
prev
);
extern
struct
device_node
*
of_get_parent
(
const
struct
device_node
*
node
);
extern
struct
device_node
*
of_get_next_child
(
const
struct
device_node
*
node
,
struct
device_node
*
prev
);
extern
struct
device_node
*
of_node_get
(
struct
device_node
*
node
);
extern
void
of_node_put
(
struct
device_node
*
node
);
/* For scanning the flat device-tree at boot time */
int
__init
of_scan_flat_dt
(
int
(
*
it
)(
unsigned
long
node
,
const
char
*
uname
,
int
depth
,
void
*
data
),
void
*
data
);
void
*
__init
of_get_flat_dt_prop
(
unsigned
long
node
,
const
char
*
name
,
unsigned
long
*
size
);
/* For updating the device tree at runtime */
extern
void
of_attach_node
(
struct
device_node
*
);
extern
void
of_detach_node
(
const
struct
device_node
*
);
/* Other Prototypes */
extern
unsigned
long
prom_init
(
unsigned
long
,
unsigned
long
,
unsigned
long
,
unsigned
long
,
unsigned
long
);
extern
void
finish_device_tree
(
void
);
extern
void
unflatten_device_tree
(
void
);
extern
void
early_init_devtree
(
void
*
);
extern
int
device_is_compatible
(
struct
device_node
*
device
,
const
char
*
);
extern
int
machine_is_compatible
(
const
char
*
compat
);
extern
unsigned
char
*
get_property
(
struct
device_node
*
node
,
const
char
*
name
,
int
*
lenp
);
extern
void
print_properties
(
struct
device_node
*
node
);
extern
int
prom_n_addr_cells
(
struct
device_node
*
np
);
extern
int
prom_n_size_cells
(
struct
device_node
*
np
);
extern
int
prom_n_intr_cells
(
struct
device_node
*
np
);
extern
void
prom_get_irq_senses
(
unsigned
char
*
senses
,
int
off
,
int
max
);
extern
int
prom_add_property
(
struct
device_node
*
np
,
struct
property
*
prop
);
#endif
/* _PPC64_PROM_H */
include/asm-ppc64/system.h
deleted
100644 → 0
View file @
302fe175
#ifndef __PPC64_SYSTEM_H
#define __PPC64_SYSTEM_H
/*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*/
#include <linux/config.h>
#include <linux/compiler.h>
#include <asm/page.h>
#include <asm/processor.h>
#include <asm/hw_irq.h>
#include <asm/synch.h>
/*
* Memory barrier.
* The sync instruction guarantees that all memory accesses initiated
* by this processor have been performed (with respect to all other
* mechanisms that access memory). The eieio instruction is a barrier
* providing an ordering (separately) for (a) cacheable stores and (b)
* loads and stores to non-cacheable memory (e.g. I/O devices).
*
* mb() prevents loads and stores being reordered across this point.
* rmb() prevents loads being reordered across this point.
* wmb() prevents stores being reordered across this point.
* read_barrier_depends() prevents data-dependent loads being reordered
* across this point (nop on PPC).
*
* We have to use the sync instructions for mb(), since lwsync doesn't
* order loads with respect to previous stores. Lwsync is fine for
* rmb(), though.
* For wmb(), we use sync since wmb is used in drivers to order
* stores to system memory with respect to writes to the device.
* However, smp_wmb() can be a lighter-weight eieio barrier on
* SMP since it is only used to order updates to system memory.
*/
#define mb() __asm__ __volatile__ ("sync" : : : "memory")
#define rmb() __asm__ __volatile__ ("lwsync" : : : "memory")
#define wmb() __asm__ __volatile__ ("sync" : : : "memory")
#define read_barrier_depends() do { } while(0)
#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
#define set_wmb(var, value) do { var = value; smp_wmb(); } while (0)
#ifdef CONFIG_SMP
#define smp_mb() mb()
#define smp_rmb() rmb()
#define smp_wmb() eieio()
#define smp_read_barrier_depends() read_barrier_depends()
#else
#define smp_mb() __asm__ __volatile__("": : :"memory")
#define smp_rmb() __asm__ __volatile__("": : :"memory")
#define smp_wmb() __asm__ __volatile__("": : :"memory")
#define smp_read_barrier_depends() do { } while(0)
#endif
/* CONFIG_SMP */
#ifdef __KERNEL__
struct
task_struct
;
struct
pt_regs
;
#ifdef CONFIG_DEBUGGER
extern
int
(
*
__debugger
)(
struct
pt_regs
*
regs
);
extern
int
(
*
__debugger_ipi
)(
struct
pt_regs
*
regs
);
extern
int
(
*
__debugger_bpt
)(
struct
pt_regs
*
regs
);
extern
int
(
*
__debugger_sstep
)(
struct
pt_regs
*
regs
);
extern
int
(
*
__debugger_iabr_match
)(
struct
pt_regs
*
regs
);
extern
int
(
*
__debugger_dabr_match
)(
struct
pt_regs
*
regs
);
extern
int
(
*
__debugger_fault_handler
)(
struct
pt_regs
*
regs
);
#define DEBUGGER_BOILERPLATE(__NAME) \
static inline int __NAME(struct pt_regs *regs) \
{ \
if (unlikely(__ ## __NAME)) \
return __ ## __NAME(regs); \
return 0; \
}
DEBUGGER_BOILERPLATE
(
debugger
)
DEBUGGER_BOILERPLATE
(
debugger_ipi
)
DEBUGGER_BOILERPLATE
(
debugger_bpt
)
DEBUGGER_BOILERPLATE
(
debugger_sstep
)
DEBUGGER_BOILERPLATE
(
debugger_iabr_match
)
DEBUGGER_BOILERPLATE
(
debugger_dabr_match
)
DEBUGGER_BOILERPLATE
(
debugger_fault_handler
)
#ifdef CONFIG_XMON
extern
void
xmon_init
(
int
enable
);
#endif
#else
static
inline
int
debugger
(
struct
pt_regs
*
regs
)
{
return
0
;
}
static
inline
int
debugger_ipi
(
struct
pt_regs
*
regs
)
{
return
0
;
}
static
inline
int
debugger_bpt
(
struct
pt_regs
*
regs
)
{
return
0
;
}
static
inline
int
debugger_sstep
(
struct
pt_regs
*
regs
)
{
return
0
;
}
static
inline
int
debugger_iabr_match
(
struct
pt_regs
*
regs
)
{
return
0
;
}
static
inline
int
debugger_dabr_match
(
struct
pt_regs
*
regs
)
{
return
0
;
}
static
inline
int
debugger_fault_handler
(
struct
pt_regs
*
regs
)
{
return
0
;
}
#endif
extern
int
set_dabr
(
unsigned
long
dabr
);
extern
void
_exception
(
int
signr
,
struct
pt_regs
*
regs
,
int
code
,
unsigned
long
addr
);
extern
int
fix_alignment
(
struct
pt_regs
*
regs
);
extern
void
bad_page_fault
(
struct
pt_regs
*
regs
,
unsigned
long
address
,
int
sig
);
extern
void
show_regs
(
struct
pt_regs
*
regs
);
extern
void
low_hash_fault
(
struct
pt_regs
*
regs
,
unsigned
long
address
);
extern
int
die
(
const
char
*
str
,
struct
pt_regs
*
regs
,
long
err
);
extern
int
_get_PVR
(
void
);
extern
void
giveup_fpu
(
struct
task_struct
*
);
extern
void
disable_kernel_fp
(
void
);
extern
void
flush_fp_to_thread
(
struct
task_struct
*
);
extern
void
enable_kernel_fp
(
void
);
extern
void
giveup_altivec
(
struct
task_struct
*
);
extern
void
disable_kernel_altivec
(
void
);
extern
void
enable_kernel_altivec
(
void
);
extern
int
emulate_altivec
(
struct
pt_regs
*
);
extern
void
cvt_fd
(
float
*
from
,
double
*
to
,
struct
thread_struct
*
thread
);
extern
void
cvt_df
(
double
*
from
,
float
*
to
,
struct
thread_struct
*
thread
);
#ifdef CONFIG_ALTIVEC
extern
void
flush_altivec_to_thread
(
struct
task_struct
*
);
#else
static
inline
void
flush_altivec_to_thread
(
struct
task_struct
*
t
)
{
}
#endif
static
inline
void
flush_spe_to_thread
(
struct
task_struct
*
t
)
{
}
extern
int
mem_init_done
;
/* set on boot once kmalloc can be called */
extern
unsigned
long
memory_limit
;
/* EBCDIC -> ASCII conversion for [0-9A-Z] on iSeries */
extern
unsigned
char
e2a
(
unsigned
char
);
extern
struct
task_struct
*
__switch_to
(
struct
task_struct
*
,
struct
task_struct
*
);
#define switch_to(prev, next, last) ((last) = __switch_to((prev), (next)))
struct
thread_struct
;
extern
struct
task_struct
*
_switch
(
struct
thread_struct
*
prev
,
struct
thread_struct
*
next
);
extern
unsigned
long
klimit
;
extern
int
powersave_nap
;
/* set if nap mode can be used in idle loop */
/*
* Atomic exchange
*
* Changes the memory location '*ptr' to be val and returns
* the previous value stored there.
*
* Inline asm pulled from arch/ppc/kernel/misc.S so ppc64
* is more like most of the other architectures.
*/
static
__inline__
unsigned
long
__xchg_u32
(
volatile
unsigned
int
*
m
,
unsigned
long
val
)
{
unsigned
long
dummy
;
__asm__
__volatile__
(
EIEIO_ON_SMP
"1: lwarx %0,0,%3 # __xchg_u32
\n
\
stwcx. %2,0,%3
\n
\
2: bne- 1b"
ISYNC_ON_SMP
:
"=&r"
(
dummy
),
"=m"
(
*
m
)
:
"r"
(
val
),
"r"
(
m
)
:
"cc"
,
"memory"
);
return
(
dummy
);
}
static
__inline__
unsigned
long
__xchg_u64
(
volatile
long
*
m
,
unsigned
long
val
)
{
unsigned
long
dummy
;
__asm__
__volatile__
(
EIEIO_ON_SMP
"1: ldarx %0,0,%3 # __xchg_u64
\n
\
stdcx. %2,0,%3
\n
\
2: bne- 1b"
ISYNC_ON_SMP
:
"=&r"
(
dummy
),
"=m"
(
*
m
)
:
"r"
(
val
),
"r"
(
m
)
:
"cc"
,
"memory"
);
return
(
dummy
);
}
/*
* This function doesn't exist, so you'll get a linker error
* if something tries to do an invalid xchg().
*/
extern
void
__xchg_called_with_bad_pointer
(
void
);
static
__inline__
unsigned
long
__xchg
(
volatile
void
*
ptr
,
unsigned
long
x
,
unsigned
int
size
)
{
switch
(
size
)
{
case
4
:
return
__xchg_u32
(
ptr
,
x
);
case
8
:
return
__xchg_u64
(
ptr
,
x
);
}
__xchg_called_with_bad_pointer
();
return
x
;
}
#define xchg(ptr,x) \
({ \
__typeof__(*(ptr)) _x_ = (x); \
(__typeof__(*(ptr))) __xchg((ptr), (unsigned long)_x_, sizeof(*(ptr))); \
})
#define tas(ptr) (xchg((ptr),1))
#define __HAVE_ARCH_CMPXCHG 1
static
__inline__
unsigned
long
__cmpxchg_u32
(
volatile
unsigned
int
*
p
,
unsigned
long
old
,
unsigned
long
new
)
{
unsigned
int
prev
;
__asm__
__volatile__
(
EIEIO_ON_SMP
"1: lwarx %0,0,%2 # __cmpxchg_u32
\n
\
cmpw 0,%0,%3
\n
\
bne- 2f
\n
\
stwcx. %4,0,%2
\n
\
bne- 1b"
ISYNC_ON_SMP
"
\n
\
2:"
:
"=&r"
(
prev
),
"=m"
(
*
p
)
:
"r"
(
p
),
"r"
(
old
),
"r"
(
new
),
"m"
(
*
p
)
:
"cc"
,
"memory"
);
return
prev
;
}
static
__inline__
unsigned
long
__cmpxchg_u64
(
volatile
unsigned
long
*
p
,
unsigned
long
old
,
unsigned
long
new
)
{
unsigned
long
prev
;
__asm__
__volatile__
(
EIEIO_ON_SMP
"1: ldarx %0,0,%2 # __cmpxchg_u64
\n
\
cmpd 0,%0,%3
\n
\
bne- 2f
\n
\
stdcx. %4,0,%2
\n
\
bne- 1b"
ISYNC_ON_SMP
"
\n
\
2:"
:
"=&r"
(
prev
),
"=m"
(
*
p
)
:
"r"
(
p
),
"r"
(
old
),
"r"
(
new
),
"m"
(
*
p
)
:
"cc"
,
"memory"
);
return
prev
;
}
/* This function doesn't exist, so you'll get a linker error
if something tries to do an invalid cmpxchg(). */
extern
void
__cmpxchg_called_with_bad_pointer
(
void
);
static
__inline__
unsigned
long
__cmpxchg
(
volatile
void
*
ptr
,
unsigned
long
old
,
unsigned
long
new
,
unsigned
int
size
)
{
switch
(
size
)
{
case
4
:
return
__cmpxchg_u32
(
ptr
,
old
,
new
);
case
8
:
return
__cmpxchg_u64
(
ptr
,
old
,
new
);
}
__cmpxchg_called_with_bad_pointer
();
return
old
;
}
#define cmpxchg(ptr,o,n)\
((__typeof__(*(ptr)))__cmpxchg((ptr),(unsigned long)(o),\
(unsigned long)(n),sizeof(*(ptr))))
/*
* We handle most unaligned accesses in hardware. On the other hand
* unaligned DMA can be very expensive on some ppc64 IO chips (it does
* powers of 2 writes until it reaches sufficient alignment).
*
* Based on this we disable the IP header alignment in network drivers.
*/
#define NET_IP_ALIGN 0
#define arch_align_stack(x) (x)
extern
unsigned
long
reloc_offset
(
void
);
#endif
/* __KERNEL__ */
#endif
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment