Commit ffbf0ab1 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/gerg/m68knommu

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/gerg/m68knommu:
  m68k: merge m68k and m68knommu arch directories
parents 73939bb5 66d857b0
config M68K config M68K
bool bool
default y default y
select HAVE_AOUT
select HAVE_IDE select HAVE_IDE
select GENERIC_ATOMIC64 select HAVE_AOUT if MMU
select GENERIC_ATOMIC64 if MMU
config MMU select HAVE_GENERIC_HARDIRQS if !MMU
bool select GENERIC_HARDIRQS_NO_DEPRECATED if !MMU
default y
config RWSEM_GENERIC_SPINLOCK config RWSEM_GENERIC_SPINLOCK
bool bool
...@@ -34,457 +32,67 @@ config TIME_LOW_RES ...@@ -34,457 +32,67 @@ config TIME_LOW_RES
bool bool
default y default y
config GENERIC_IOMAP
bool
default y
config ARCH_MAY_HAVE_PC_FDC
bool
depends on BROKEN && (Q40 || SUN3X)
default y
config NO_IOPORT config NO_IOPORT
def_bool y def_bool y
config NO_DMA config NO_DMA
def_bool SUN3 def_bool (MMU && SUN3) || (!MMU && !COLDFIRE)
config ZONE_DMA
bool
default y
config HZ config HZ
int int
default 1000 if CLEOPATRA
default 100 default 100
config ARCH_USES_GETTIMEOFFSET
def_bool y
source "init/Kconfig" source "init/Kconfig"
source "kernel/Kconfig.freezer" source "kernel/Kconfig.freezer"
menu "Platform dependent setup" config MMU
bool "MMU-based Paged Memory Management Support"
config EISA
bool
---help---
The Extended Industry Standard Architecture (EISA) bus was
developed as an open alternative to the IBM MicroChannel bus.
The EISA bus provided some of the features of the IBM MicroChannel
bus while maintaining backward compatibility with cards made for
the older ISA bus. The EISA bus saw limited use between 1988 and
1995 when it was made obsolete by the PCI bus.
Say Y here if you are building a kernel for an EISA-based machine.
Otherwise, say N.
config MCA
bool
help
MicroChannel Architecture is found in some IBM PS/2 machines and
laptops. It is a bus system similar to PCI or ISA. See
<file:Documentation/mca.txt> (and especially the web page given
there) before attempting to build an MCA bus kernel.
config PCMCIA
tristate
---help---
Say Y here if you want to attach PCMCIA- or PC-cards to your Linux
computer. These are credit-card size devices such as network cards,
modems or hard drives often used with laptops computers. There are
actually two varieties of these cards: the older 16 bit PCMCIA cards
and the newer 32 bit CardBus cards. If you want to use CardBus
cards, you need to say Y here and also to "CardBus support" below.
To use your PC-cards, you will need supporting software from David
Hinds' pcmcia-cs package (see the file <file:Documentation/Changes>
for location). Please also read the PCMCIA-HOWTO, available from
<http://www.tldp.org/docs.html#howto>.
To compile this driver as modules, choose M here: the
modules will be called pcmcia_core and ds.
config AMIGA
bool "Amiga support"
select MMU_MOTOROLA if MMU
help
This option enables support for the Amiga series of computers. If
you plan to use this kernel on an Amiga, say Y here and browse the
material available in <file:Documentation/m68k>; otherwise say N.
config ATARI
bool "Atari support"
select MMU_MOTOROLA if MMU
help
This option enables support for the 68000-based Atari series of
computers (including the TT, Falcon and Medusa). If you plan to use
this kernel on an Atari, say Y here and browse the material
available in <file:Documentation/m68k>; otherwise say N.
config MAC
bool "Macintosh support"
select MMU_MOTOROLA if MMU
help
This option enables support for the Apple Macintosh series of
computers (yes, there is experimental support now, at least for part
of the series).
Say N unless you're willing to code the remaining necessary support.
;)
config NUBUS
bool
depends on MAC
default y
config M68K_L2_CACHE
bool
depends on MAC
default y
config APOLLO
bool "Apollo support"
select MMU_MOTOROLA if MMU
help
Say Y here if you want to run Linux on an MC680x0-based Apollo
Domain workstation such as the DN3500.
config VME
bool "VME (Motorola and BVM) support"
select MMU_MOTOROLA if MMU
help
Say Y here if you want to build a kernel for a 680x0 based VME
board. Boards currently supported include Motorola boards MVME147,
MVME162, MVME166, MVME167, MVME172, and MVME177. BVME4000 and
BVME6000 boards from BVM Ltd are also supported.
config MVME147
bool "MVME147 support"
depends on VME
help
Say Y to include support for early Motorola VME boards. This will
build a kernel which can run on MVME147 single-board computers. If
you select this option you will have to select the appropriate
drivers for SCSI, Ethernet and serial ports later on.
config MVME16x
bool "MVME162, 166 and 167 support"
depends on VME
help
Say Y to include support for Motorola VME boards. This will build a
kernel which can run on MVME162, MVME166, MVME167, MVME172, and
MVME177 boards. If you select this option you will have to select
the appropriate drivers for SCSI, Ethernet and serial ports later
on.
config BVME6000
bool "BVME4000 and BVME6000 support"
depends on VME
help
Say Y to include support for VME boards from BVM Ltd. This will
build a kernel which can run on BVME4000 and BVME6000 boards. If
you select this option you will have to select the appropriate
drivers for SCSI, Ethernet and serial ports later on.
config HP300
bool "HP9000/300 and HP9000/400 support"
select MMU_MOTOROLA if MMU
help
This option enables support for the HP9000/300 and HP9000/400 series
of workstations. Support for these machines is still somewhat
experimental. If you plan to try to use the kernel on such a machine
say Y here.
Everybody else says N.
config DIO
bool "DIO bus support"
depends on HP300
default y default y
help help
Say Y here to enable support for the "DIO" expansion bus used in Select if you want MMU-based virtualised addressing space
HP300 machines. If you are using such a system you almost certainly support by paged memory management. If unsure, say 'Y'.
want this.
config SUN3X
bool "Sun3x support"
select MMU_MOTOROLA if MMU
select M68030
help
This option enables support for the Sun 3x series of workstations.
Be warned that this support is very experimental.
Note that Sun 3x kernels are not compatible with Sun 3 hardware.
General Linux information on the Sun 3x series (now discontinued)
is at <http://www.angelfire.com/ca2/tech68k/sun3.html>.
If you don't want to compile a kernel for a Sun 3x, say N.
config Q40
bool "Q40/Q60 support"
select MMU_MOTOROLA if MMU
help
The Q40 is a Motorola 68040-based successor to the Sinclair QL
manufactured in Germany. There is an official Q40 home page at
<http://www.q40.de/>. This option enables support for the Q40 and
Q60. Select your CPU below. For 68LC060 don't forget to enable FPU
emulation.
config SUN3
bool "Sun3 support"
depends on !MMU_MOTOROLA
select MMU_SUN3 if MMU
select M68020
help
This option enables support for the Sun 3 series of workstations
(3/50, 3/60, 3/1xx, 3/2xx systems). Enabling this option requires
that all other hardware types must be disabled, as Sun 3 kernels
are incompatible with all other m68k targets (including Sun 3x!).
If you don't want to compile a kernel exclusively for a Sun 3, say N.
config NATFEAT
bool "ARAnyM emulator support"
depends on ATARI
help
This option enables support for ARAnyM native features, such as
access to a disk image as /dev/hda.
config NFBLOCK
tristate "NatFeat block device support"
depends on BLOCK && NATFEAT
help
Say Y to include support for the ARAnyM NatFeat block device
which allows direct access to the hard drives without using
the hardware emulation.
config NFCON
tristate "NatFeat console driver"
depends on NATFEAT
help
Say Y to include support for the ARAnyM NatFeat console driver
which allows the console output to be redirected to the stderr
output of ARAnyM.
config NFETH
tristate "NatFeat Ethernet support"
depends on NET_ETHERNET && NATFEAT
help
Say Y to include support for the ARAnyM NatFeat network device
which will emulate a regular ethernet device while presenting an
ethertap device to the host system.
comment "Processor type"
config M68020
bool "68020 support"
help
If you anticipate running this kernel on a computer with a MC68020
processor, say Y. Otherwise, say N. Note that the 68020 requires a
68851 MMU (Memory Management Unit) to run Linux/m68k, except on the
Sun 3, which provides its own version.
config M68030
bool "68030 support"
depends on !MMU_SUN3
help
If you anticipate running this kernel on a computer with a MC68030
processor, say Y. Otherwise, say N. Note that a MC68EC030 will not
work, as it does not include an MMU (Memory Management Unit).
config M68040
bool "68040 support"
depends on !MMU_SUN3
help
If you anticipate running this kernel on a computer with a MC68LC040
or MC68040 processor, say Y. Otherwise, say N. Note that an
MC68EC040 will not work, as it does not include an MMU (Memory
Management Unit).
config M68060
bool "68060 support"
depends on !MMU_SUN3
help
If you anticipate running this kernel on a computer with a MC68060
processor, say Y. Otherwise, say N.
config MMU_MOTOROLA
bool
config MMU_SUN3
bool
depends on MMU && !MMU_MOTOROLA
config M68KFPU_EMU
bool "Math emulation support (EXPERIMENTAL)"
depends on EXPERIMENTAL
help
At some point in the future, this will cause floating-point math
instructions to be emulated by the kernel on machines that lack a
floating-point math coprocessor. Thrill-seekers and chronically
sleep-deprived psychotic hacker types can say Y now, everyone else
should probably wait a while.
config M68KFPU_EMU_EXTRAPREC
bool "Math emulation extra precision"
depends on M68KFPU_EMU
help
The fpu uses normally a few bit more during calculations for
correct rounding, the emulator can (often) do the same but this
extra calculation can cost quite some time, so you can disable
it here. The emulator will then "only" calculate with a 64 bit
mantissa and round slightly incorrect, what is more than enough
for normal usage.
config M68KFPU_EMU_ONLY
bool "Math emulation only kernel"
depends on M68KFPU_EMU
help
This option prevents any floating-point instructions from being
compiled into the kernel, thereby the kernel doesn't save any
floating point context anymore during task switches, so this
kernel will only be usable on machines without a floating-point
math coprocessor. This makes the kernel a bit faster as no tests
needs to be executed whether a floating-point instruction in the
kernel should be executed or not.
config ADVANCED
bool "Advanced configuration options"
---help---
This gives you access to some advanced options for the CPU. The
defaults should be fine for most users, but these options may make
it possible for you to improve performance somewhat if you know what
you are doing.
Note that the answer to this question won't directly affect the
kernel: saying N will just cause the configurator to skip all
the questions about these options.
Most users should say N to this question. menu "Platform dependent setup"
config RMW_INSNS
bool "Use read-modify-write instructions"
depends on ADVANCED
---help---
This allows to use certain instructions that work with indivisible
read-modify-write bus cycles. While this is faster than the
workaround of disabling interrupts, it can conflict with DMA
( = direct memory access) on many Amiga systems, and it is also said
to destabilize other machines. It is very likely that this will
cause serious problems on any Amiga or Atari Medusa if set. The only
configuration where it should work are 68030-based Ataris, where it
apparently improves performance. But you've been warned! Unless you
really know what you are doing, say N. Try Y only if you're quite
adventurous.
config SINGLE_MEMORY_CHUNK
bool "Use one physical chunk of memory only" if ADVANCED && !SUN3
default y if SUN3
select NEED_MULTIPLE_NODES
help
Ignore all but the first contiguous chunk of physical memory for VM
purposes. This will save a few bytes kernel size and may speed up
some operations. Say N if not sure.
config 060_WRITETHROUGH if MMU
bool "Use write-through caching for 68060 supervisor accesses" source arch/m68k/Kconfig.mmu
depends on ADVANCED && M68060 endif
---help--- if !MMU
The 68060 generally uses copyback caching of recently accessed data. source arch/m68k/Kconfig.nommu
Copyback caching means that memory writes will be held in an on-chip endif
cache and only written back to memory some time later. Saying Y
here will force supervisor (kernel) accesses to use writethrough
caching. Writethrough caching means that data is written to memory
straight away, so that cache and memory data always agree.
Writethrough caching is less efficient, but is needed for some
drivers on 68060 based systems where the 68060 bus snooping signal
is hardwired on. The 53c710 SCSI driver is known to suffer from
this problem.
config ARCH_DISCONTIGMEM_ENABLE
def_bool !SINGLE_MEMORY_CHUNK
config NODES_SHIFT
int
default "3"
depends on !SINGLE_MEMORY_CHUNK
source "mm/Kconfig" source "mm/Kconfig"
endmenu endmenu
menu "General setup" menu "Executable file formats"
source "fs/Kconfig.binfmt" source "fs/Kconfig.binfmt"
config ZORRO endmenu
bool "Amiga Zorro (AutoConfig) bus support"
depends on AMIGA
help
This enables support for the Zorro bus in the Amiga. If you have
expansion cards in your Amiga that conform to the Amiga
AutoConfig(tm) specification, say Y, otherwise N. Note that even
expansion cards that do not fit in the Zorro slots but fit in e.g.
the CPU slot may fall in this category, so you have to say Y to let
Linux use these.
config AMIGA_PCMCIA
bool "Amiga 1200/600 PCMCIA support (EXPERIMENTAL)"
depends on AMIGA && EXPERIMENTAL
help
Include support in the kernel for pcmcia on Amiga 1200 and Amiga
600. If you intend to use pcmcia cards say Y; otherwise say N.
config STRAM_PROC
bool "ST-RAM statistics in /proc"
depends on ATARI
help
Say Y here to report ST-RAM usage statistics in /proc/stram.
config HEARTBEAT
bool "Use power LED as a heartbeat" if AMIGA || APOLLO || ATARI || MAC ||Q40
default y if !AMIGA && !APOLLO && !ATARI && !MAC && !Q40 && HP300
help
Use the power-on LED on your machine as a load meter. The exact
behavior is platform-dependent, but normally the flash frequency is
a hyperbolic function of the 5-minute load average.
# We have a dedicated heartbeat LED. :-)
config PROC_HARDWARE
bool "/proc/hardware support"
help
Say Y here to support the /proc/hardware file, which gives you
access to information about the machine you're running on,
including the model, CPU, MMU, clock speed, BogoMIPS rating,
and memory size.
config ISA
bool
depends on Q40 || AMIGA_PCMCIA
default y
help
Find out whether you have ISA slots on your motherboard. ISA is the
name of a bus system, i.e. the way the CPU talks to the other stuff
inside your box. Other bus systems are PCI, EISA, MicroChannel
(MCA) or VESA. ISA is an older system, now being displaced by PCI;
newer boards don't support it. If you have ISA, say Y, otherwise N.
config GENERIC_ISA_DMA
bool
depends on Q40 || AMIGA_PCMCIA
default y
config ZONE_DMA
bool
default y
source "drivers/pci/Kconfig" if !MMU
menu "Power management options"
source "drivers/zorro/Kconfig" config PM
bool "Power Management support"
help
Support processor power management modes
endmenu endmenu
endif
source "net/Kconfig" source "net/Kconfig"
source "drivers/Kconfig" source "drivers/Kconfig"
if MMU
menu "Character devices" menu "Character devices"
config ATARI_MFPSER config ATARI_MFPSER
...@@ -627,6 +235,8 @@ config SERIAL_CONSOLE ...@@ -627,6 +235,8 @@ config SERIAL_CONSOLE
endmenu endmenu
endif
source "fs/Kconfig" source "fs/Kconfig"
source "arch/m68k/Kconfig.debug" source "arch/m68k/Kconfig.debug"
......
...@@ -2,4 +2,38 @@ menu "Kernel hacking" ...@@ -2,4 +2,38 @@ menu "Kernel hacking"
source "lib/Kconfig.debug" source "lib/Kconfig.debug"
if !MMU
config FULLDEBUG
bool "Full Symbolic/Source Debugging support"
help
Enable debugging symbols on kernel build.
config HIGHPROFILE
bool "Use fast second timer for profiling"
depends on COLDFIRE
help
Use a fast secondary clock to produce profiling information.
config BOOTPARAM
bool 'Compiled-in Kernel Boot Parameter'
config BOOTPARAM_STRING
string 'Kernel Boot Parameter'
default 'console=ttyS0,19200'
depends on BOOTPARAM
config NO_KERNEL_MSG
bool "Suppress Kernel BUG Messages"
help
Do not output any debug BUG messages within the kernel.
config BDM_DISABLE
bool "Disable BDM signals"
depends on (EXPERIMENTAL && COLDFIRE)
help
Disable the ColdFire CPU's BDM signals.
endif
endmenu endmenu
config GENERIC_IOMAP
bool
default y
config ARCH_MAY_HAVE_PC_FDC
bool
depends on BROKEN && (Q40 || SUN3X)
default y
config ARCH_USES_GETTIMEOFFSET
def_bool y
config EISA
bool
---help---
The Extended Industry Standard Architecture (EISA) bus was
developed as an open alternative to the IBM MicroChannel bus.
The EISA bus provided some of the features of the IBM MicroChannel
bus while maintaining backward compatibility with cards made for
the older ISA bus. The EISA bus saw limited use between 1988 and
1995 when it was made obsolete by the PCI bus.
Say Y here if you are building a kernel for an EISA-based machine.
Otherwise, say N.
config MCA
bool
help
MicroChannel Architecture is found in some IBM PS/2 machines and
laptops. It is a bus system similar to PCI or ISA. See
<file:Documentation/mca.txt> (and especially the web page given
there) before attempting to build an MCA bus kernel.
config PCMCIA
tristate
---help---
Say Y here if you want to attach PCMCIA- or PC-cards to your Linux
computer. These are credit-card size devices such as network cards,
modems or hard drives often used with laptops computers. There are
actually two varieties of these cards: the older 16 bit PCMCIA cards
and the newer 32 bit CardBus cards. If you want to use CardBus
cards, you need to say Y here and also to "CardBus support" below.
To use your PC-cards, you will need supporting software from David
Hinds' pcmcia-cs package (see the file <file:Documentation/Changes>
for location). Please also read the PCMCIA-HOWTO, available from
<http://www.tldp.org/docs.html#howto>.
To compile this driver as modules, choose M here: the
modules will be called pcmcia_core and ds.
config AMIGA
bool "Amiga support"
select MMU_MOTOROLA if MMU
help
This option enables support for the Amiga series of computers. If
you plan to use this kernel on an Amiga, say Y here and browse the
material available in <file:Documentation/m68k>; otherwise say N.
config ATARI
bool "Atari support"
select MMU_MOTOROLA if MMU
help
This option enables support for the 68000-based Atari series of
computers (including the TT, Falcon and Medusa). If you plan to use
this kernel on an Atari, say Y here and browse the material
available in <file:Documentation/m68k>; otherwise say N.
config MAC
bool "Macintosh support"
select MMU_MOTOROLA if MMU
help
This option enables support for the Apple Macintosh series of
computers (yes, there is experimental support now, at least for part
of the series).
Say N unless you're willing to code the remaining necessary support.
;)
config NUBUS
bool
depends on MAC
default y
config M68K_L2_CACHE
bool
depends on MAC
default y
config APOLLO
bool "Apollo support"
select MMU_MOTOROLA if MMU
help
Say Y here if you want to run Linux on an MC680x0-based Apollo
Domain workstation such as the DN3500.
config VME
bool "VME (Motorola and BVM) support"
select MMU_MOTOROLA if MMU
help
Say Y here if you want to build a kernel for a 680x0 based VME
board. Boards currently supported include Motorola boards MVME147,
MVME162, MVME166, MVME167, MVME172, and MVME177. BVME4000 and
BVME6000 boards from BVM Ltd are also supported.
config MVME147
bool "MVME147 support"
depends on VME
help
Say Y to include support for early Motorola VME boards. This will
build a kernel which can run on MVME147 single-board computers. If
you select this option you will have to select the appropriate
drivers for SCSI, Ethernet and serial ports later on.
config MVME16x
bool "MVME162, 166 and 167 support"
depends on VME
help
Say Y to include support for Motorola VME boards. This will build a
kernel which can run on MVME162, MVME166, MVME167, MVME172, and
MVME177 boards. If you select this option you will have to select
the appropriate drivers for SCSI, Ethernet and serial ports later
on.
config BVME6000
bool "BVME4000 and BVME6000 support"
depends on VME
help
Say Y to include support for VME boards from BVM Ltd. This will
build a kernel which can run on BVME4000 and BVME6000 boards. If
you select this option you will have to select the appropriate
drivers for SCSI, Ethernet and serial ports later on.
config HP300
bool "HP9000/300 and HP9000/400 support"
select MMU_MOTOROLA if MMU
help
This option enables support for the HP9000/300 and HP9000/400 series
of workstations. Support for these machines is still somewhat
experimental. If you plan to try to use the kernel on such a machine
say Y here.
Everybody else says N.
config DIO
bool "DIO bus support"
depends on HP300
default y
help
Say Y here to enable support for the "DIO" expansion bus used in
HP300 machines. If you are using such a system you almost certainly
want this.
config SUN3X
bool "Sun3x support"
select MMU_MOTOROLA if MMU
select M68030
help
This option enables support for the Sun 3x series of workstations.
Be warned that this support is very experimental.
Note that Sun 3x kernels are not compatible with Sun 3 hardware.
General Linux information on the Sun 3x series (now discontinued)
is at <http://www.angelfire.com/ca2/tech68k/sun3.html>.
If you don't want to compile a kernel for a Sun 3x, say N.
config Q40
bool "Q40/Q60 support"
select MMU_MOTOROLA if MMU
help
The Q40 is a Motorola 68040-based successor to the Sinclair QL
manufactured in Germany. There is an official Q40 home page at
<http://www.q40.de/>. This option enables support for the Q40 and
Q60. Select your CPU below. For 68LC060 don't forget to enable FPU
emulation.
config SUN3
bool "Sun3 support"
depends on !MMU_MOTOROLA
select MMU_SUN3 if MMU
select M68020
help
This option enables support for the Sun 3 series of workstations
(3/50, 3/60, 3/1xx, 3/2xx systems). Enabling this option requires
that all other hardware types must be disabled, as Sun 3 kernels
are incompatible with all other m68k targets (including Sun 3x!).
If you don't want to compile a kernel exclusively for a Sun 3, say N.
config NATFEAT
bool "ARAnyM emulator support"
depends on ATARI
help
This option enables support for ARAnyM native features, such as
access to a disk image as /dev/hda.
config NFBLOCK
tristate "NatFeat block device support"
depends on BLOCK && NATFEAT
help
Say Y to include support for the ARAnyM NatFeat block device
which allows direct access to the hard drives without using
the hardware emulation.
config NFCON
tristate "NatFeat console driver"
depends on NATFEAT
help
Say Y to include support for the ARAnyM NatFeat console driver
which allows the console output to be redirected to the stderr
output of ARAnyM.
config NFETH
tristate "NatFeat Ethernet support"
depends on NET_ETHERNET && NATFEAT
help
Say Y to include support for the ARAnyM NatFeat network device
which will emulate a regular ethernet device while presenting an
ethertap device to the host system.
comment "Processor type"
config M68020
bool "68020 support"
help
If you anticipate running this kernel on a computer with a MC68020
processor, say Y. Otherwise, say N. Note that the 68020 requires a
68851 MMU (Memory Management Unit) to run Linux/m68k, except on the
Sun 3, which provides its own version.
config M68030
bool "68030 support"
depends on !MMU_SUN3
help
If you anticipate running this kernel on a computer with a MC68030
processor, say Y. Otherwise, say N. Note that a MC68EC030 will not
work, as it does not include an MMU (Memory Management Unit).
config M68040
bool "68040 support"
depends on !MMU_SUN3
help
If you anticipate running this kernel on a computer with a MC68LC040
or MC68040 processor, say Y. Otherwise, say N. Note that an
MC68EC040 will not work, as it does not include an MMU (Memory
Management Unit).
config M68060
bool "68060 support"
depends on !MMU_SUN3
help
If you anticipate running this kernel on a computer with a MC68060
processor, say Y. Otherwise, say N.
config MMU_MOTOROLA
bool
config MMU_SUN3
bool
depends on MMU && !MMU_MOTOROLA
config M68KFPU_EMU
bool "Math emulation support (EXPERIMENTAL)"
depends on EXPERIMENTAL
help
At some point in the future, this will cause floating-point math
instructions to be emulated by the kernel on machines that lack a
floating-point math coprocessor. Thrill-seekers and chronically
sleep-deprived psychotic hacker types can say Y now, everyone else
should probably wait a while.
config M68KFPU_EMU_EXTRAPREC
bool "Math emulation extra precision"
depends on M68KFPU_EMU
help
The fpu uses normally a few bit more during calculations for
correct rounding, the emulator can (often) do the same but this
extra calculation can cost quite some time, so you can disable
it here. The emulator will then "only" calculate with a 64 bit
mantissa and round slightly incorrect, what is more than enough
for normal usage.
config M68KFPU_EMU_ONLY
bool "Math emulation only kernel"
depends on M68KFPU_EMU
help
This option prevents any floating-point instructions from being
compiled into the kernel, thereby the kernel doesn't save any
floating point context anymore during task switches, so this
kernel will only be usable on machines without a floating-point
math coprocessor. This makes the kernel a bit faster as no tests
needs to be executed whether a floating-point instruction in the
kernel should be executed or not.
config ADVANCED
bool "Advanced configuration options"
---help---
This gives you access to some advanced options for the CPU. The
defaults should be fine for most users, but these options may make
it possible for you to improve performance somewhat if you know what
you are doing.
Note that the answer to this question won't directly affect the
kernel: saying N will just cause the configurator to skip all
the questions about these options.
Most users should say N to this question.
config RMW_INSNS
bool "Use read-modify-write instructions"
depends on ADVANCED
---help---
This allows to use certain instructions that work with indivisible
read-modify-write bus cycles. While this is faster than the
workaround of disabling interrupts, it can conflict with DMA
( = direct memory access) on many Amiga systems, and it is also said
to destabilize other machines. It is very likely that this will
cause serious problems on any Amiga or Atari Medusa if set. The only
configuration where it should work are 68030-based Ataris, where it
apparently improves performance. But you've been warned! Unless you
really know what you are doing, say N. Try Y only if you're quite
adventurous.
config SINGLE_MEMORY_CHUNK
bool "Use one physical chunk of memory only" if ADVANCED && !SUN3
default y if SUN3
select NEED_MULTIPLE_NODES
help
Ignore all but the first contiguous chunk of physical memory for VM
purposes. This will save a few bytes kernel size and may speed up
some operations. Say N if not sure.
config 060_WRITETHROUGH
bool "Use write-through caching for 68060 supervisor accesses"
depends on ADVANCED && M68060
---help---
The 68060 generally uses copyback caching of recently accessed data.
Copyback caching means that memory writes will be held in an on-chip
cache and only written back to memory some time later. Saying Y
here will force supervisor (kernel) accesses to use writethrough
caching. Writethrough caching means that data is written to memory
straight away, so that cache and memory data always agree.
Writethrough caching is less efficient, but is needed for some
drivers on 68060 based systems where the 68060 bus snooping signal
is hardwired on. The 53c710 SCSI driver is known to suffer from
this problem.
config ARCH_DISCONTIGMEM_ENABLE
def_bool !SINGLE_MEMORY_CHUNK
config NODES_SHIFT
int
default "3"
depends on !SINGLE_MEMORY_CHUNK
config ZORRO
bool "Amiga Zorro (AutoConfig) bus support"
depends on AMIGA
help
This enables support for the Zorro bus in the Amiga. If you have
expansion cards in your Amiga that conform to the Amiga
AutoConfig(tm) specification, say Y, otherwise N. Note that even
expansion cards that do not fit in the Zorro slots but fit in e.g.
the CPU slot may fall in this category, so you have to say Y to let
Linux use these.
config AMIGA_PCMCIA
bool "Amiga 1200/600 PCMCIA support (EXPERIMENTAL)"
depends on AMIGA && EXPERIMENTAL
help
Include support in the kernel for pcmcia on Amiga 1200 and Amiga
600. If you intend to use pcmcia cards say Y; otherwise say N.
config STRAM_PROC
bool "ST-RAM statistics in /proc"
depends on ATARI
help
Say Y here to report ST-RAM usage statistics in /proc/stram.
config HEARTBEAT
bool "Use power LED as a heartbeat" if AMIGA || APOLLO || ATARI || MAC ||Q40
default y if !AMIGA && !APOLLO && !ATARI && !MAC && !Q40 && HP300
help
Use the power-on LED on your machine as a load meter. The exact
behavior is platform-dependent, but normally the flash frequency is
a hyperbolic function of the 5-minute load average.
# We have a dedicated heartbeat LED. :-)
config PROC_HARDWARE
bool "/proc/hardware support"
help
Say Y here to support the /proc/hardware file, which gives you
access to information about the machine you're running on,
including the model, CPU, MMU, clock speed, BogoMIPS rating,
and memory size.
config ISA
bool
depends on Q40 || AMIGA_PCMCIA
default y
help
Find out whether you have ISA slots on your motherboard. ISA is the
name of a bus system, i.e. the way the CPU talks to the other stuff
inside your box. Other bus systems are PCI, EISA, MicroChannel
(MCA) or VESA. ISA is an older system, now being displaced by PCI;
newer boards don't support it. If you have ISA, say Y, otherwise N.
config GENERIC_ISA_DMA
bool
depends on Q40 || AMIGA_PCMCIA
default y
source "drivers/pci/Kconfig"
source "drivers/zorro/Kconfig"
config M68K
bool
default y
select HAVE_IDE
select HAVE_GENERIC_HARDIRQS
select GENERIC_HARDIRQS_NO_DEPRECATED
config MMU
bool
default n
config NO_DMA
bool
depends on !COLDFIRE
default y
config FPU config FPU
bool bool
default n default n
config ZONE_DMA
bool
default y
config RWSEM_GENERIC_SPINLOCK
bool
default y
config RWSEM_XCHGADD_ALGORITHM
bool
default n
config ARCH_HAS_ILOG2_U32
bool
default n
config ARCH_HAS_ILOG2_U64
bool
default n
config GENERIC_FIND_NEXT_BIT config GENERIC_FIND_NEXT_BIT
bool bool
default y default y
...@@ -46,29 +10,14 @@ config GENERIC_GPIO ...@@ -46,29 +10,14 @@ config GENERIC_GPIO
bool bool
default n default n
config GENERIC_HWEIGHT
bool
default y
config GENERIC_CALIBRATE_DELAY
bool
default y
config GENERIC_CMOS_UPDATE config GENERIC_CMOS_UPDATE
bool bool
default y default y
config TIME_LOW_RES
bool
default y
config GENERIC_CLOCKEVENTS config GENERIC_CLOCKEVENTS
bool bool
default n default n
config NO_IOPORT
def_bool y
config COLDFIRE_SW_A7 config COLDFIRE_SW_A7
bool bool
default n default n
...@@ -85,12 +34,6 @@ config HAVE_MBAR ...@@ -85,12 +34,6 @@ config HAVE_MBAR
config HAVE_IPSBAR config HAVE_IPSBAR
bool bool
source "init/Kconfig"
source "kernel/Kconfig.freezer"
menu "Processor type and features"
choice choice
prompt "CPU" prompt "CPU"
default M68EZ328 default M68EZ328
...@@ -630,11 +573,6 @@ config 4KSTACKS ...@@ -630,11 +573,6 @@ config 4KSTACKS
running more threads on a system and also reduces the pressure running more threads on a system and also reduces the pressure
on the VM subsystem for higher order allocations. on the VM subsystem for higher order allocations.
config HZ
int
default 1000 if CLEOPATRA
default 100
comment "RAM configuration" comment "RAM configuration"
config RAMBASE config RAMBASE
...@@ -803,10 +741,6 @@ endif ...@@ -803,10 +741,6 @@ endif
source "kernel/time/Kconfig" source "kernel/time/Kconfig"
source "mm/Kconfig"
endmenu
config ISA_DMA_API config ISA_DMA_API
bool bool
depends on !M5272 depends on !M5272
...@@ -814,31 +748,3 @@ config ISA_DMA_API ...@@ -814,31 +748,3 @@ config ISA_DMA_API
source "drivers/pcmcia/Kconfig" source "drivers/pcmcia/Kconfig"
menu "Executable file formats"
source "fs/Kconfig.binfmt"
endmenu
menu "Power management options"
config PM
bool "Power Management support"
help
Support processor power management modes
endmenu
source "net/Kconfig"
source "drivers/Kconfig"
source "fs/Kconfig"
source "arch/m68knommu/Kconfig.debug"
source "security/Kconfig"
source "crypto/Kconfig"
source "lib/Kconfig"
#
# m68k/Makefile
#
# This file is included by the global makefile so that you can add your own
# architecture-specific flags and dependencies. Remember to do have actions
# for "archclean" and "archdep" for cleaning up and making dependencies for
# this architecture
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 1994 by Hamish Macdonald
#
KBUILD_DEFCONFIG := multi_defconfig KBUILD_DEFCONFIG := multi_defconfig
# override top level makefile ifdef CONFIG_MMU
AS += -m68020 include $(srctree)/arch/m68k/Makefile_mm
LDFLAGS := -m m68kelf
KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/m68k/kernel/module.lds
ifneq ($(SUBARCH),$(ARCH))
ifeq ($(CROSS_COMPILE),)
CROSS_COMPILE := $(call cc-cross-prefix, \
m68k-linux-gnu- m68k-linux- m68k-unknown-linux-gnu-)
endif
endif
ifdef CONFIG_SUN3
LDFLAGS_vmlinux = -N
endif
CHECKFLAGS += -D__mc68000__
# without -fno-strength-reduce the 53c7xx.c driver fails ;-(
KBUILD_CFLAGS += -pipe -fno-strength-reduce -ffixed-a2
# enable processor switch if compiled only for a single cpu
ifndef CONFIG_M68020
ifndef CONFIG_M68030
ifndef CONFIG_M68060
KBUILD_CFLAGS += -m68040
endif
ifndef CONFIG_M68040
KBUILD_CFLAGS += -m68060
endif
endif
endif
ifdef CONFIG_KGDB
# If configured for kgdb support, include debugging infos and keep the
# frame pointer
KBUILD_CFLAGS := $(subst -fomit-frame-pointer,,$(KBUILD_CFLAGS)) -g
endif
ifndef CONFIG_SUN3
head-y := arch/m68k/kernel/head.o
else else
head-y := arch/m68k/kernel/sun3-head.o include $(srctree)/arch/m68k/Makefile_no
endif endif
core-y += arch/m68k/kernel/ arch/m68k/mm/
libs-y += arch/m68k/lib/
core-$(CONFIG_Q40) += arch/m68k/q40/
core-$(CONFIG_AMIGA) += arch/m68k/amiga/
core-$(CONFIG_ATARI) += arch/m68k/atari/
core-$(CONFIG_MAC) += arch/m68k/mac/
core-$(CONFIG_HP300) += arch/m68k/hp300/
core-$(CONFIG_APOLLO) += arch/m68k/apollo/
core-$(CONFIG_MVME147) += arch/m68k/mvme147/
core-$(CONFIG_MVME16x) += arch/m68k/mvme16x/
core-$(CONFIG_BVME6000) += arch/m68k/bvme6000/
core-$(CONFIG_SUN3X) += arch/m68k/sun3x/ arch/m68k/sun3/
core-$(CONFIG_SUN3) += arch/m68k/sun3/ arch/m68k/sun3/prom/
core-$(CONFIG_NATFEAT) += arch/m68k/emu/
core-$(CONFIG_M68040) += arch/m68k/fpsp040/
core-$(CONFIG_M68060) += arch/m68k/ifpsp060/
core-$(CONFIG_M68KFPU_EMU) += arch/m68k/math-emu/
all: zImage
lilo: vmlinux
if [ -f $(INSTALL_PATH)/vmlinux ]; then mv -f $(INSTALL_PATH)/vmlinux $(INSTALL_PATH)/vmlinux.old; fi
if [ -f $(INSTALL_PATH)/System.map ]; then mv -f $(INSTALL_PATH)/System.map $(INSTALL_PATH)/System.old; fi
cat vmlinux > $(INSTALL_PATH)/vmlinux
cp System.map $(INSTALL_PATH)/System.map
if [ -x /sbin/lilo ]; then /sbin/lilo; else /etc/lilo/install; fi
zImage compressed: vmlinux.gz
vmlinux.gz: vmlinux
ifndef CONFIG_KGDB
cp vmlinux vmlinux.tmp
$(STRIP) vmlinux.tmp
gzip -9c vmlinux.tmp >vmlinux.gz
rm vmlinux.tmp
else
gzip -9c vmlinux >vmlinux.gz
endif
bzImage: vmlinux.bz2
vmlinux.bz2: vmlinux
ifndef CONFIG_KGDB
cp vmlinux vmlinux.tmp
$(STRIP) vmlinux.tmp
bzip2 -1c vmlinux.tmp >vmlinux.bz2
rm vmlinux.tmp
else
bzip2 -1c vmlinux >vmlinux.bz2
endif
archclean:
rm -f vmlinux.gz vmlinux.bz2
install:
sh $(srctree)/arch/m68k/install.sh $(KERNELRELEASE) vmlinux.gz System.map "$(INSTALL_PATH)"
#
# m68k/Makefile
#
# This file is included by the global makefile so that you can add your own
# architecture-specific flags and dependencies. Remember to do have actions
# for "archclean" and "archdep" for cleaning up and making dependencies for
# this architecture
#
# This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive
# for more details.
#
# Copyright (C) 1994 by Hamish Macdonald
#
# override top level makefile
AS += -m68020
LDFLAGS := -m m68kelf
KBUILD_LDFLAGS_MODULE += -T $(srctree)/arch/m68k/kernel/module.lds
ifneq ($(SUBARCH),$(ARCH))
ifeq ($(CROSS_COMPILE),)
CROSS_COMPILE := $(call cc-cross-prefix, \
m68k-linux-gnu- m68k-linux- m68k-unknown-linux-gnu-)
endif
endif
ifdef CONFIG_SUN3
LDFLAGS_vmlinux = -N
endif
CHECKFLAGS += -D__mc68000__
# without -fno-strength-reduce the 53c7xx.c driver fails ;-(
KBUILD_CFLAGS += -pipe -fno-strength-reduce -ffixed-a2
# enable processor switch if compiled only for a single cpu
ifndef CONFIG_M68020
ifndef CONFIG_M68030
ifndef CONFIG_M68060
KBUILD_CFLAGS += -m68040
endif
ifndef CONFIG_M68040
KBUILD_CFLAGS += -m68060
endif
endif
endif
ifdef CONFIG_KGDB
# If configured for kgdb support, include debugging infos and keep the
# frame pointer
KBUILD_CFLAGS := $(subst -fomit-frame-pointer,,$(KBUILD_CFLAGS)) -g
endif
ifndef CONFIG_SUN3
head-y := arch/m68k/kernel/head.o
else
head-y := arch/m68k/kernel/sun3-head.o
endif
core-y += arch/m68k/kernel/ arch/m68k/mm/
libs-y += arch/m68k/lib/
core-$(CONFIG_Q40) += arch/m68k/q40/
core-$(CONFIG_AMIGA) += arch/m68k/amiga/
core-$(CONFIG_ATARI) += arch/m68k/atari/
core-$(CONFIG_MAC) += arch/m68k/mac/
core-$(CONFIG_HP300) += arch/m68k/hp300/
core-$(CONFIG_APOLLO) += arch/m68k/apollo/
core-$(CONFIG_MVME147) += arch/m68k/mvme147/
core-$(CONFIG_MVME16x) += arch/m68k/mvme16x/
core-$(CONFIG_BVME6000) += arch/m68k/bvme6000/
core-$(CONFIG_SUN3X) += arch/m68k/sun3x/ arch/m68k/sun3/
core-$(CONFIG_SUN3) += arch/m68k/sun3/ arch/m68k/sun3/prom/
core-$(CONFIG_NATFEAT) += arch/m68k/emu/
core-$(CONFIG_M68040) += arch/m68k/fpsp040/
core-$(CONFIG_M68060) += arch/m68k/ifpsp060/
core-$(CONFIG_M68KFPU_EMU) += arch/m68k/math-emu/
all: zImage
lilo: vmlinux
if [ -f $(INSTALL_PATH)/vmlinux ]; then mv -f $(INSTALL_PATH)/vmlinux $(INSTALL_PATH)/vmlinux.old; fi
if [ -f $(INSTALL_PATH)/System.map ]; then mv -f $(INSTALL_PATH)/System.map $(INSTALL_PATH)/System.old; fi
cat vmlinux > $(INSTALL_PATH)/vmlinux
cp System.map $(INSTALL_PATH)/System.map
if [ -x /sbin/lilo ]; then /sbin/lilo; else /etc/lilo/install; fi
zImage compressed: vmlinux.gz
vmlinux.gz: vmlinux
ifndef CONFIG_KGDB
cp vmlinux vmlinux.tmp
$(STRIP) vmlinux.tmp
gzip -9c vmlinux.tmp >vmlinux.gz
rm vmlinux.tmp
else
gzip -9c vmlinux >vmlinux.gz
endif
bzImage: vmlinux.bz2
vmlinux.bz2: vmlinux
ifndef CONFIG_KGDB
cp vmlinux vmlinux.tmp
$(STRIP) vmlinux.tmp
bzip2 -1c vmlinux.tmp >vmlinux.bz2
rm vmlinux.tmp
else
bzip2 -1c vmlinux >vmlinux.bz2
endif
archclean:
rm -f vmlinux.gz vmlinux.bz2
install:
sh $(srctree)/arch/m68k/install.sh $(KERNELRELEASE) vmlinux.gz System.map "$(INSTALL_PATH)"
# #
# arch/m68knommu/Makefile # arch/m68k/Makefile
# #
# This file is subject to the terms and conditions of the GNU General Public # This file is subject to the terms and conditions of the GNU General Public
# License. See the file "COPYING" in the main directory of this archive # License. See the file "COPYING" in the main directory of this archive
...@@ -8,8 +8,6 @@ ...@@ -8,8 +8,6 @@
# (C) Copyright 2002, Greg Ungerer <gerg@snapgear.com> # (C) Copyright 2002, Greg Ungerer <gerg@snapgear.com>
# #
KBUILD_DEFCONFIG := m5208evb_defconfig
platform-$(CONFIG_M68328) := 68328 platform-$(CONFIG_M68328) := 68328
platform-$(CONFIG_M68EZ328) := 68EZ328 platform-$(CONFIG_M68EZ328) := 68EZ328
platform-$(CONFIG_M68VZ328) := 68VZ328 platform-$(CONFIG_M68VZ328) := 68VZ328
...@@ -82,7 +80,7 @@ cpuclass-$(CONFIG_M68360) := 68360 ...@@ -82,7 +80,7 @@ cpuclass-$(CONFIG_M68360) := 68360
CPUCLASS := $(cpuclass-y) CPUCLASS := $(cpuclass-y)
ifneq ($(CPUCLASS),$(PLATFORM)) ifneq ($(CPUCLASS),$(PLATFORM))
CLASSDIR := arch/m68knommu/platform/$(cpuclass-y)/ CLASSDIR := arch/m68k/platform/$(cpuclass-y)/
endif endif
export PLATFORM BOARD MODEL CPUCLASS export PLATFORM BOARD MODEL CPUCLASS
...@@ -114,13 +112,13 @@ KBUILD_CFLAGS += $(cflags-y) ...@@ -114,13 +112,13 @@ KBUILD_CFLAGS += $(cflags-y)
KBUILD_CFLAGS += -D__linux__ KBUILD_CFLAGS += -D__linux__
KBUILD_CFLAGS += -DUTS_SYSNAME=\"uClinux\" KBUILD_CFLAGS += -DUTS_SYSNAME=\"uClinux\"
head-y := arch/m68knommu/platform/$(cpuclass-y)/head.o head-y := arch/m68k/platform/$(cpuclass-y)/head.o
core-y += arch/m68knommu/kernel/ \ core-y += arch/m68k/kernel/ \
arch/m68knommu/mm/ \ arch/m68k/mm/ \
$(CLASSDIR) \ $(CLASSDIR) \
arch/m68knommu/platform/$(PLATFORM)/ arch/m68k/platform/$(PLATFORM)/
libs-y += arch/m68knommu/lib/ libs-y += arch/m68k/lib/
archclean: archclean:
# CONFIG_MMU is not set
CONFIG_EXPERIMENTAL=y CONFIG_EXPERIMENTAL=y
CONFIG_LOG_BUF_SHIFT=14 CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
...@@ -37,6 +38,7 @@ CONFIG_INET=y ...@@ -37,6 +38,7 @@ CONFIG_INET=y
# CONFIG_INET_LRO is not set # CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set # CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set # CONFIG_IPV6 is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y CONFIG_MTD=y
CONFIG_MTD_PARTITIONS=y CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CHAR=y CONFIG_MTD_CHAR=y
......
# CONFIG_MMU is not set
CONFIG_EXPERIMENTAL=y CONFIG_EXPERIMENTAL=y
CONFIG_LOG_BUF_SHIFT=14 CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
...@@ -35,6 +36,7 @@ CONFIG_INET=y ...@@ -35,6 +36,7 @@ CONFIG_INET=y
# CONFIG_INET_LRO is not set # CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set # CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set # CONFIG_IPV6 is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y CONFIG_MTD=y
CONFIG_MTD_PARTITIONS=y CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CHAR=y CONFIG_MTD_CHAR=y
......
# CONFIG_MMU is not set
CONFIG_EXPERIMENTAL=y CONFIG_EXPERIMENTAL=y
CONFIG_LOG_BUF_SHIFT=14 CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
...@@ -33,6 +34,7 @@ CONFIG_INET=y ...@@ -33,6 +34,7 @@ CONFIG_INET=y
# CONFIG_INET_LRO is not set # CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set # CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set # CONFIG_IPV6 is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y CONFIG_MTD=y
CONFIG_MTD_PARTITIONS=y CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CHAR=y CONFIG_MTD_CHAR=y
......
# CONFIG_MMU is not set
CONFIG_EXPERIMENTAL=y CONFIG_EXPERIMENTAL=y
CONFIG_LOG_BUF_SHIFT=14 CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
...@@ -36,6 +37,7 @@ CONFIG_INET=y ...@@ -36,6 +37,7 @@ CONFIG_INET=y
# CONFIG_INET_LRO is not set # CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set # CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set # CONFIG_IPV6 is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y CONFIG_MTD=y
CONFIG_MTD_PARTITIONS=y CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CHAR=y CONFIG_MTD_CHAR=y
......
# CONFIG_MMU is not set
CONFIG_EXPERIMENTAL=y CONFIG_EXPERIMENTAL=y
CONFIG_LOG_BUF_SHIFT=14 CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
...@@ -35,6 +36,7 @@ CONFIG_INET=y ...@@ -35,6 +36,7 @@ CONFIG_INET=y
# CONFIG_INET_LRO is not set # CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set # CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set # CONFIG_IPV6 is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y CONFIG_MTD=y
CONFIG_MTD_PARTITIONS=y CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CHAR=y CONFIG_MTD_CHAR=y
......
# CONFIG_MMU is not set
CONFIG_EXPERIMENTAL=y CONFIG_EXPERIMENTAL=y
CONFIG_LOG_BUF_SHIFT=14 CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
...@@ -35,6 +36,7 @@ CONFIG_INET=y ...@@ -35,6 +36,7 @@ CONFIG_INET=y
# CONFIG_INET_LRO is not set # CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set # CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set # CONFIG_IPV6 is not set
# CONFIG_FW_LOADER is not set
CONFIG_MTD=y CONFIG_MTD=y
CONFIG_MTD_PARTITIONS=y CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CHAR=y CONFIG_MTD_CHAR=y
......
# ifdef CONFIG_MMU
# Makefile for the linux kernel. include arch/m68k/kernel/Makefile_mm
#
ifndef CONFIG_SUN3
extra-y := head.o
else else
extra-y := sun3-head.o include arch/m68k/kernel/Makefile_no
endif endif
extra-y += vmlinux.lds
obj-y := entry.o process.o traps.o ints.o signal.o ptrace.o module.o \
sys_m68k.o time.o setup.o m68k_ksyms.o devres.o
devres-y = ../../../kernel/irq/devres.o
obj-y$(CONFIG_MMU_SUN3) += dma.o # no, it's not a typo
#
# Makefile for the linux kernel.
#
ifndef CONFIG_SUN3
extra-y := head.o
else
extra-y := sun3-head.o
endif
extra-y += vmlinux.lds
obj-y := entry.o process.o traps.o ints.o signal.o ptrace.o module.o \
sys_m68k.o time.o setup.o m68k_ksyms.o devres.o
devres-y = ../../../kernel/irq/devres.o
obj-y$(CONFIG_MMU_SUN3) += dma.o # no, it's not a typo
/*
* This program is used to generate definitions needed by
* assembly language modules.
*
* We use the technique used in the OSF Mach kernel code:
* generate asm statements containing #defines,
* compile this file to assembler, and then extract the
* #defines from the assembly-language output.
*/
#define ASM_OFFSETS_C
#include <linux/stddef.h>
#include <linux/sched.h>
#include <linux/kernel_stat.h>
#include <linux/kbuild.h>
#include <asm/bootinfo.h>
#include <asm/irq.h>
#include <asm/amigahw.h>
#include <linux/font.h>
int main(void)
{
/* offsets into the task struct */
DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
DEFINE(TASK_INFO, offsetof(struct task_struct, thread.info));
DEFINE(TASK_MM, offsetof(struct task_struct, mm));
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
DEFINE(TASK_TINFO, offsetof(struct task_struct, thread.info)); #include "asm-offsets_mm.c"
#else
#include "asm-offsets_no.c"
#endif #endif
/* offsets into the thread struct */
DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
DEFINE(THREAD_USP, offsetof(struct thread_struct, usp));
DEFINE(THREAD_SR, offsetof(struct thread_struct, sr));
DEFINE(THREAD_FS, offsetof(struct thread_struct, fs));
DEFINE(THREAD_CRP, offsetof(struct thread_struct, crp));
DEFINE(THREAD_ESP0, offsetof(struct thread_struct, esp0));
DEFINE(THREAD_FPREG, offsetof(struct thread_struct, fp));
DEFINE(THREAD_FPCNTL, offsetof(struct thread_struct, fpcntl));
DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fpstate));
/* offsets into the thread_info struct */
DEFINE(TINFO_PREEMPT, offsetof(struct thread_info, preempt_count));
DEFINE(TINFO_FLAGS, offsetof(struct thread_info, flags));
/* offsets into the pt_regs */
DEFINE(PT_OFF_D0, offsetof(struct pt_regs, d0));
DEFINE(PT_OFF_ORIG_D0, offsetof(struct pt_regs, orig_d0));
DEFINE(PT_OFF_D1, offsetof(struct pt_regs, d1));
DEFINE(PT_OFF_D2, offsetof(struct pt_regs, d2));
DEFINE(PT_OFF_D3, offsetof(struct pt_regs, d3));
DEFINE(PT_OFF_D4, offsetof(struct pt_regs, d4));
DEFINE(PT_OFF_D5, offsetof(struct pt_regs, d5));
DEFINE(PT_OFF_A0, offsetof(struct pt_regs, a0));
DEFINE(PT_OFF_A1, offsetof(struct pt_regs, a1));
DEFINE(PT_OFF_A2, offsetof(struct pt_regs, a2));
DEFINE(PT_OFF_PC, offsetof(struct pt_regs, pc));
DEFINE(PT_OFF_SR, offsetof(struct pt_regs, sr));
/* bitfields are a bit difficult */
DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, pc) + 4);
/* offsets into the irq_cpustat_t struct */
DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
/* offsets into the bi_record struct */
DEFINE(BIR_TAG, offsetof(struct bi_record, tag));
DEFINE(BIR_SIZE, offsetof(struct bi_record, size));
DEFINE(BIR_DATA, offsetof(struct bi_record, data));
/* offsets into font_desc (drivers/video/console/font.h) */
DEFINE(FONT_DESC_IDX, offsetof(struct font_desc, idx));
DEFINE(FONT_DESC_NAME, offsetof(struct font_desc, name));
DEFINE(FONT_DESC_WIDTH, offsetof(struct font_desc, width));
DEFINE(FONT_DESC_HEIGHT, offsetof(struct font_desc, height));
DEFINE(FONT_DESC_DATA, offsetof(struct font_desc, data));
DEFINE(FONT_DESC_PREF, offsetof(struct font_desc, pref));
/* signal defines */
DEFINE(LSIGSEGV, SIGSEGV);
DEFINE(LSEGV_MAPERR, SEGV_MAPERR);
DEFINE(LSIGTRAP, SIGTRAP);
DEFINE(LTRAP_TRACE, TRAP_TRACE);
/* offsets into the custom struct */
DEFINE(CUSTOMBASE, &amiga_custom);
DEFINE(C_INTENAR, offsetof(struct CUSTOM, intenar));
DEFINE(C_INTREQR, offsetof(struct CUSTOM, intreqr));
DEFINE(C_INTENA, offsetof(struct CUSTOM, intena));
DEFINE(C_INTREQ, offsetof(struct CUSTOM, intreq));
DEFINE(C_SERDATR, offsetof(struct CUSTOM, serdatr));
DEFINE(C_SERDAT, offsetof(struct CUSTOM, serdat));
DEFINE(C_SERPER, offsetof(struct CUSTOM, serper));
DEFINE(CIAABASE, &ciaa);
DEFINE(CIABBASE, &ciab);
DEFINE(C_PRA, offsetof(struct CIA, pra));
DEFINE(ZTWOBASE, zTwoBase);
return 0;
}
/*
* This program is used to generate definitions needed by
* assembly language modules.
*
* We use the technique used in the OSF Mach kernel code:
* generate asm statements containing #defines,
* compile this file to assembler, and then extract the
* #defines from the assembly-language output.
*/
#define ASM_OFFSETS_C
#include <linux/stddef.h>
#include <linux/sched.h>
#include <linux/kernel_stat.h>
#include <linux/kbuild.h>
#include <asm/bootinfo.h>
#include <asm/irq.h>
#include <asm/amigahw.h>
#include <linux/font.h>
int main(void)
{
/* offsets into the task struct */
DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
DEFINE(TASK_INFO, offsetof(struct task_struct, thread.info));
DEFINE(TASK_MM, offsetof(struct task_struct, mm));
#ifdef CONFIG_MMU
DEFINE(TASK_TINFO, offsetof(struct task_struct, thread.info));
#endif
/* offsets into the thread struct */
DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
DEFINE(THREAD_USP, offsetof(struct thread_struct, usp));
DEFINE(THREAD_SR, offsetof(struct thread_struct, sr));
DEFINE(THREAD_FS, offsetof(struct thread_struct, fs));
DEFINE(THREAD_CRP, offsetof(struct thread_struct, crp));
DEFINE(THREAD_ESP0, offsetof(struct thread_struct, esp0));
DEFINE(THREAD_FPREG, offsetof(struct thread_struct, fp));
DEFINE(THREAD_FPCNTL, offsetof(struct thread_struct, fpcntl));
DEFINE(THREAD_FPSTATE, offsetof(struct thread_struct, fpstate));
/* offsets into the thread_info struct */
DEFINE(TINFO_PREEMPT, offsetof(struct thread_info, preempt_count));
DEFINE(TINFO_FLAGS, offsetof(struct thread_info, flags));
/* offsets into the pt_regs */
DEFINE(PT_OFF_D0, offsetof(struct pt_regs, d0));
DEFINE(PT_OFF_ORIG_D0, offsetof(struct pt_regs, orig_d0));
DEFINE(PT_OFF_D1, offsetof(struct pt_regs, d1));
DEFINE(PT_OFF_D2, offsetof(struct pt_regs, d2));
DEFINE(PT_OFF_D3, offsetof(struct pt_regs, d3));
DEFINE(PT_OFF_D4, offsetof(struct pt_regs, d4));
DEFINE(PT_OFF_D5, offsetof(struct pt_regs, d5));
DEFINE(PT_OFF_A0, offsetof(struct pt_regs, a0));
DEFINE(PT_OFF_A1, offsetof(struct pt_regs, a1));
DEFINE(PT_OFF_A2, offsetof(struct pt_regs, a2));
DEFINE(PT_OFF_PC, offsetof(struct pt_regs, pc));
DEFINE(PT_OFF_SR, offsetof(struct pt_regs, sr));
/* bitfields are a bit difficult */
DEFINE(PT_OFF_FORMATVEC, offsetof(struct pt_regs, pc) + 4);
/* offsets into the irq_cpustat_t struct */
DEFINE(CPUSTAT_SOFTIRQ_PENDING, offsetof(irq_cpustat_t, __softirq_pending));
/* offsets into the bi_record struct */
DEFINE(BIR_TAG, offsetof(struct bi_record, tag));
DEFINE(BIR_SIZE, offsetof(struct bi_record, size));
DEFINE(BIR_DATA, offsetof(struct bi_record, data));
/* offsets into font_desc (drivers/video/console/font.h) */
DEFINE(FONT_DESC_IDX, offsetof(struct font_desc, idx));
DEFINE(FONT_DESC_NAME, offsetof(struct font_desc, name));
DEFINE(FONT_DESC_WIDTH, offsetof(struct font_desc, width));
DEFINE(FONT_DESC_HEIGHT, offsetof(struct font_desc, height));
DEFINE(FONT_DESC_DATA, offsetof(struct font_desc, data));
DEFINE(FONT_DESC_PREF, offsetof(struct font_desc, pref));
/* signal defines */
DEFINE(LSIGSEGV, SIGSEGV);
DEFINE(LSEGV_MAPERR, SEGV_MAPERR);
DEFINE(LSIGTRAP, SIGTRAP);
DEFINE(LTRAP_TRACE, TRAP_TRACE);
/* offsets into the custom struct */
DEFINE(CUSTOMBASE, &amiga_custom);
DEFINE(C_INTENAR, offsetof(struct CUSTOM, intenar));
DEFINE(C_INTREQR, offsetof(struct CUSTOM, intreqr));
DEFINE(C_INTENA, offsetof(struct CUSTOM, intena));
DEFINE(C_INTREQ, offsetof(struct CUSTOM, intreq));
DEFINE(C_SERDATR, offsetof(struct CUSTOM, serdatr));
DEFINE(C_SERDAT, offsetof(struct CUSTOM, serdat));
DEFINE(C_SERPER, offsetof(struct CUSTOM, serper));
DEFINE(CIAABASE, &ciaa);
DEFINE(CIABBASE, &ciab);
DEFINE(C_PRA, offsetof(struct CIA, pra));
DEFINE(ZTWOBASE, zTwoBase);
return 0;
}
/* #ifdef CONFIG_MMU
* This file is subject to the terms and conditions of the GNU General Public #include "dma_mm.c"
* License. See the file COPYING in the main directory of this archive #else
* for more details. #include "dma_no.c"
*/ #endif
#undef DEBUG
#include <linux/dma-mapping.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <asm/pgalloc.h>
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t flag)
{
struct page *page, **map;
pgprot_t pgprot;
void *addr;
int i, order;
pr_debug("dma_alloc_coherent: %d,%x\n", size, flag);
size = PAGE_ALIGN(size);
order = get_order(size);
page = alloc_pages(flag, order);
if (!page)
return NULL;
*handle = page_to_phys(page);
map = kmalloc(sizeof(struct page *) << order, flag & ~__GFP_DMA);
if (!map) {
__free_pages(page, order);
return NULL;
}
split_page(page, order);
order = 1 << order;
size >>= PAGE_SHIFT;
map[0] = page;
for (i = 1; i < size; i++)
map[i] = page + i;
for (; i < order; i++)
__free_page(page + i);
pgprot = __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
if (CPU_IS_040_OR_060)
pgprot_val(pgprot) |= _PAGE_GLOBAL040 | _PAGE_NOCACHE_S;
else
pgprot_val(pgprot) |= _PAGE_NOCACHE030;
addr = vmap(map, size, VM_MAP, pgprot);
kfree(map);
return addr;
}
EXPORT_SYMBOL(dma_alloc_coherent);
void dma_free_coherent(struct device *dev, size_t size,
void *addr, dma_addr_t handle)
{
pr_debug("dma_free_coherent: %p, %x\n", addr, handle);
vfree(addr);
}
EXPORT_SYMBOL(dma_free_coherent);
void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
cache_push(handle, size);
break;
case DMA_FROM_DEVICE:
cache_clear(handle, size);
break;
default:
if (printk_ratelimit())
printk("dma_sync_single_for_device: unsupported dir %u\n", dir);
break;
}
}
EXPORT_SYMBOL(dma_sync_single_for_device);
void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir)
{
int i;
for (i = 0; i < nents; sg++, i++)
dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir);
}
EXPORT_SYMBOL(dma_sync_sg_for_device);
dma_addr_t dma_map_single(struct device *dev, void *addr, size_t size,
enum dma_data_direction dir)
{
dma_addr_t handle = virt_to_bus(addr);
dma_sync_single_for_device(dev, handle, size, dir);
return handle;
}
EXPORT_SYMBOL(dma_map_single);
dma_addr_t dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir)
{
dma_addr_t handle = page_to_phys(page) + offset;
dma_sync_single_for_device(dev, handle, size, dir);
return handle;
}
EXPORT_SYMBOL(dma_map_page);
int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir)
{
int i;
for (i = 0; i < nents; sg++, i++) {
sg->dma_address = sg_phys(sg);
dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir);
}
return nents;
}
EXPORT_SYMBOL(dma_map_sg);
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#undef DEBUG
#include <linux/dma-mapping.h>
#include <linux/device.h>
#include <linux/kernel.h>
#include <linux/scatterlist.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <asm/pgalloc.h>
void *dma_alloc_coherent(struct device *dev, size_t size,
dma_addr_t *handle, gfp_t flag)
{
struct page *page, **map;
pgprot_t pgprot;
void *addr;
int i, order;
pr_debug("dma_alloc_coherent: %d,%x\n", size, flag);
size = PAGE_ALIGN(size);
order = get_order(size);
page = alloc_pages(flag, order);
if (!page)
return NULL;
*handle = page_to_phys(page);
map = kmalloc(sizeof(struct page *) << order, flag & ~__GFP_DMA);
if (!map) {
__free_pages(page, order);
return NULL;
}
split_page(page, order);
order = 1 << order;
size >>= PAGE_SHIFT;
map[0] = page;
for (i = 1; i < size; i++)
map[i] = page + i;
for (; i < order; i++)
__free_page(page + i);
pgprot = __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
if (CPU_IS_040_OR_060)
pgprot_val(pgprot) |= _PAGE_GLOBAL040 | _PAGE_NOCACHE_S;
else
pgprot_val(pgprot) |= _PAGE_NOCACHE030;
addr = vmap(map, size, VM_MAP, pgprot);
kfree(map);
return addr;
}
EXPORT_SYMBOL(dma_alloc_coherent);
void dma_free_coherent(struct device *dev, size_t size,
void *addr, dma_addr_t handle)
{
pr_debug("dma_free_coherent: %p, %x\n", addr, handle);
vfree(addr);
}
EXPORT_SYMBOL(dma_free_coherent);
void dma_sync_single_for_device(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir)
{
switch (dir) {
case DMA_TO_DEVICE:
cache_push(handle, size);
break;
case DMA_FROM_DEVICE:
cache_clear(handle, size);
break;
default:
if (printk_ratelimit())
printk("dma_sync_single_for_device: unsupported dir %u\n", dir);
break;
}
}
EXPORT_SYMBOL(dma_sync_single_for_device);
void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir)
{
int i;
for (i = 0; i < nents; sg++, i++)
dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir);
}
EXPORT_SYMBOL(dma_sync_sg_for_device);
dma_addr_t dma_map_single(struct device *dev, void *addr, size_t size,
enum dma_data_direction dir)
{
dma_addr_t handle = virt_to_bus(addr);
dma_sync_single_for_device(dev, handle, size, dir);
return handle;
}
EXPORT_SYMBOL(dma_map_single);
dma_addr_t dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir)
{
dma_addr_t handle = page_to_phys(page) + offset;
dma_sync_single_for_device(dev, handle, size, dir);
return handle;
}
EXPORT_SYMBOL(dma_map_page);
int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
enum dma_data_direction dir)
{
int i;
for (i = 0; i < nents; sg++, i++) {
sg->dma_address = sg_phys(sg);
dma_sync_single_for_device(dev, sg->dma_address, sg->length, dir);
}
return nents;
}
EXPORT_SYMBOL(dma_map_sg);
/* -*- mode: asm -*- #ifdef CONFIG_MMU
* #include "entry_mm.S"
* linux/arch/m68k/kernel/entry.S #else
* #include "entry_no.S"
* Copyright (C) 1991, 1992 Linus Torvalds
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file README.legal in the main directory of this archive
* for more details.
*
* Linux/m68k support by Hamish Macdonald
*
* 68060 fixes by Jesper Skov
*
*/
/*
* entry.S contains the system-call and fault low-level handling routines.
* This also contains the timer-interrupt handler, as well as all interrupts
* and faults that can result in a task-switch.
*
* NOTE: This code handles signal-recognition, which happens every time
* after a timer-interrupt and after each system call.
*
*/
/*
* 12/03/96 Jes: Currently we only support m68k single-cpu systems, so
* all pointers that used to be 'current' are now entry
* number 0 in the 'current_set' list.
*
* 6/05/00 RZ: addedd writeback completion after return from sighandler
* for 68040
*/
#include <linux/linkage.h>
#include <asm/entry.h>
#include <asm/errno.h>
#include <asm/setup.h>
#include <asm/segment.h>
#include <asm/traps.h>
#include <asm/unistd.h>
#include <asm/asm-offsets.h>
.globl system_call, buserr, trap, resume
.globl sys_call_table
.globl sys_fork, sys_clone, sys_vfork
.globl ret_from_interrupt, bad_interrupt
.globl auto_irqhandler_fixup
.globl user_irqvec_fixup, user_irqhandler_fixup
.text
ENTRY(buserr)
SAVE_ALL_INT
GET_CURRENT(%d0)
movel %sp,%sp@- | stack frame pointer argument
bsrl buserr_c
addql #4,%sp
jra .Lret_from_exception
ENTRY(trap)
SAVE_ALL_INT
GET_CURRENT(%d0)
movel %sp,%sp@- | stack frame pointer argument
bsrl trap_c
addql #4,%sp
jra .Lret_from_exception
| After a fork we jump here directly from resume,
| so that %d1 contains the previous task
| schedule_tail now used regardless of CONFIG_SMP
ENTRY(ret_from_fork)
movel %d1,%sp@-
jsr schedule_tail
addql #4,%sp
jra .Lret_from_exception
do_trace_entry:
movel #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace
subql #4,%sp
SAVE_SWITCH_STACK
jbsr syscall_trace
RESTORE_SWITCH_STACK
addql #4,%sp
movel %sp@(PT_OFF_ORIG_D0),%d0
cmpl #NR_syscalls,%d0
jcs syscall
badsys:
movel #-ENOSYS,%sp@(PT_OFF_D0)
jra ret_from_syscall
do_trace_exit:
subql #4,%sp
SAVE_SWITCH_STACK
jbsr syscall_trace
RESTORE_SWITCH_STACK
addql #4,%sp
jra .Lret_from_exception
ENTRY(ret_from_signal)
tstb %curptr@(TASK_INFO+TINFO_FLAGS+2)
jge 1f
jbsr syscall_trace
1: RESTORE_SWITCH_STACK
addql #4,%sp
/* on 68040 complete pending writebacks if any */
#ifdef CONFIG_M68040
bfextu %sp@(PT_OFF_FORMATVEC){#0,#4},%d0
subql #7,%d0 | bus error frame ?
jbne 1f
movel %sp,%sp@-
jbsr berr_040cleanup
addql #4,%sp
1:
#endif #endif
jra .Lret_from_exception
ENTRY(system_call)
SAVE_ALL_SYS
GET_CURRENT(%d1)
| save top of frame
movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
| syscall trace?
tstb %curptr@(TASK_INFO+TINFO_FLAGS+2)
jmi do_trace_entry
cmpl #NR_syscalls,%d0
jcc badsys
syscall:
jbsr @(sys_call_table,%d0:l:4)@(0)
movel %d0,%sp@(PT_OFF_D0) | save the return value
ret_from_syscall:
|oriw #0x0700,%sr
movew %curptr@(TASK_INFO+TINFO_FLAGS+2),%d0
jne syscall_exit_work
1: RESTORE_ALL
syscall_exit_work:
btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
bnes 1b | if so, skip resched, signals
lslw #1,%d0
jcs do_trace_exit
jmi do_delayed_trace
lslw #8,%d0
jmi do_signal_return
pea resume_userspace
jra schedule
ENTRY(ret_from_exception)
.Lret_from_exception:
btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
bnes 1f | if so, skip resched, signals
| only allow interrupts when we are really the last one on the
| kernel stack, otherwise stack overflow can occur during
| heavy interrupt load
andw #ALLOWINT,%sr
resume_userspace:
moveb %curptr@(TASK_INFO+TINFO_FLAGS+3),%d0
jne exit_work
1: RESTORE_ALL
exit_work:
| save top of frame
movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
lslb #1,%d0
jmi do_signal_return
pea resume_userspace
jra schedule
do_signal_return:
|andw #ALLOWINT,%sr
subql #4,%sp | dummy return address
SAVE_SWITCH_STACK
pea %sp@(SWITCH_STACK_SIZE)
bsrl do_signal
addql #4,%sp
RESTORE_SWITCH_STACK
addql #4,%sp
jbra resume_userspace
do_delayed_trace:
bclr #7,%sp@(PT_OFF_SR) | clear trace bit in SR
pea 1 | send SIGTRAP
movel %curptr,%sp@-
pea LSIGTRAP
jbsr send_sig
addql #8,%sp
addql #4,%sp
jbra resume_userspace
/* This is the main interrupt handler for autovector interrupts */
ENTRY(auto_inthandler)
SAVE_ALL_INT
GET_CURRENT(%d0)
addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
| put exception # in d0
bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
subw #VEC_SPUR,%d0
movel %sp,%sp@-
movel %d0,%sp@- | put vector # on stack
auto_irqhandler_fixup = . + 2
jsr __m68k_handle_int | process the IRQ
addql #8,%sp | pop parameters off stack
ret_from_interrupt:
subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
jeq ret_from_last_interrupt
2: RESTORE_ALL
ALIGN
ret_from_last_interrupt:
moveq #(~ALLOWINT>>8)&0xff,%d0
andb %sp@(PT_OFF_SR),%d0
jne 2b
/* check if we need to do software interrupts */
tstl irq_stat+CPUSTAT_SOFTIRQ_PENDING
jeq .Lret_from_exception
pea ret_from_exception
jra do_softirq
/* Handler for user defined interrupt vectors */
ENTRY(user_inthandler)
SAVE_ALL_INT
GET_CURRENT(%d0)
addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
| put exception # in d0
bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
user_irqvec_fixup = . + 2
subw #VEC_USER,%d0
movel %sp,%sp@-
movel %d0,%sp@- | put vector # on stack
user_irqhandler_fixup = . + 2
jsr __m68k_handle_int | process the IRQ
addql #8,%sp | pop parameters off stack
subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
jeq ret_from_last_interrupt
RESTORE_ALL
/* Handler for uninitialized and spurious interrupts */
ENTRY(bad_inthandler)
SAVE_ALL_INT
GET_CURRENT(%d0)
addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
movel %sp,%sp@-
jsr handle_badint
addql #4,%sp
subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
jeq ret_from_last_interrupt
RESTORE_ALL
ENTRY(sys_fork)
SAVE_SWITCH_STACK
pea %sp@(SWITCH_STACK_SIZE)
jbsr m68k_fork
addql #4,%sp
RESTORE_SWITCH_STACK
rts
ENTRY(sys_clone)
SAVE_SWITCH_STACK
pea %sp@(SWITCH_STACK_SIZE)
jbsr m68k_clone
addql #4,%sp
RESTORE_SWITCH_STACK
rts
ENTRY(sys_vfork)
SAVE_SWITCH_STACK
pea %sp@(SWITCH_STACK_SIZE)
jbsr m68k_vfork
addql #4,%sp
RESTORE_SWITCH_STACK
rts
ENTRY(sys_sigreturn)
SAVE_SWITCH_STACK
jbsr do_sigreturn
RESTORE_SWITCH_STACK
rts
ENTRY(sys_rt_sigreturn)
SAVE_SWITCH_STACK
jbsr do_rt_sigreturn
RESTORE_SWITCH_STACK
rts
resume:
/*
* Beware - when entering resume, prev (the current task) is
* in a0, next (the new task) is in a1,so don't change these
* registers until their contents are no longer needed.
*/
/* save sr */
movew %sr,%a0@(TASK_THREAD+THREAD_SR)
/* save fs (sfc,%dfc) (may be pointing to kernel memory) */
movec %sfc,%d0
movew %d0,%a0@(TASK_THREAD+THREAD_FS)
/* save usp */
/* it is better to use a movel here instead of a movew 8*) */
movec %usp,%d0
movel %d0,%a0@(TASK_THREAD+THREAD_USP)
/* save non-scratch registers on stack */
SAVE_SWITCH_STACK
/* save current kernel stack pointer */
movel %sp,%a0@(TASK_THREAD+THREAD_KSP)
/* save floating point context */
#ifndef CONFIG_M68KFPU_EMU_ONLY
#ifdef CONFIG_M68KFPU_EMU
tstl m68k_fputype
jeq 3f
#endif
fsave %a0@(TASK_THREAD+THREAD_FPSTATE)
#if defined(CONFIG_M68060)
#if !defined(CPU_M68060_ONLY)
btst #3,m68k_cputype+3
beqs 1f
#endif
/* The 060 FPU keeps status in bits 15-8 of the first longword */
tstb %a0@(TASK_THREAD+THREAD_FPSTATE+2)
jeq 3f
#if !defined(CPU_M68060_ONLY)
jra 2f
#endif
#endif /* CONFIG_M68060 */
#if !defined(CPU_M68060_ONLY)
1: tstb %a0@(TASK_THREAD+THREAD_FPSTATE)
jeq 3f
#endif
2: fmovemx %fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG)
fmoveml %fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL)
3:
#endif /* CONFIG_M68KFPU_EMU_ONLY */
/* Return previous task in %d1 */
movel %curptr,%d1
/* switch to new task (a1 contains new task) */
movel %a1,%curptr
/* restore floating point context */
#ifndef CONFIG_M68KFPU_EMU_ONLY
#ifdef CONFIG_M68KFPU_EMU
tstl m68k_fputype
jeq 4f
#endif
#if defined(CONFIG_M68060)
#if !defined(CPU_M68060_ONLY)
btst #3,m68k_cputype+3
beqs 1f
#endif
/* The 060 FPU keeps status in bits 15-8 of the first longword */
tstb %a1@(TASK_THREAD+THREAD_FPSTATE+2)
jeq 3f
#if !defined(CPU_M68060_ONLY)
jra 2f
#endif
#endif /* CONFIG_M68060 */
#if !defined(CPU_M68060_ONLY)
1: tstb %a1@(TASK_THREAD+THREAD_FPSTATE)
jeq 3f
#endif
2: fmovemx %a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7
fmoveml %a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar
3: frestore %a1@(TASK_THREAD+THREAD_FPSTATE)
4:
#endif /* CONFIG_M68KFPU_EMU_ONLY */
/* restore the kernel stack pointer */
movel %a1@(TASK_THREAD+THREAD_KSP),%sp
/* restore non-scratch registers */
RESTORE_SWITCH_STACK
/* restore user stack pointer */
movel %a1@(TASK_THREAD+THREAD_USP),%a0
movel %a0,%usp
/* restore fs (sfc,%dfc) */
movew %a1@(TASK_THREAD+THREAD_FS),%a0
movec %a0,%sfc
movec %a0,%dfc
/* restore status register */
movew %a1@(TASK_THREAD+THREAD_SR),%sr
rts
.data
ALIGN
sys_call_table:
.long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
.long sys_exit
.long sys_fork
.long sys_read
.long sys_write
.long sys_open /* 5 */
.long sys_close
.long sys_waitpid
.long sys_creat
.long sys_link
.long sys_unlink /* 10 */
.long sys_execve
.long sys_chdir
.long sys_time
.long sys_mknod
.long sys_chmod /* 15 */
.long sys_chown16
.long sys_ni_syscall /* old break syscall holder */
.long sys_stat
.long sys_lseek
.long sys_getpid /* 20 */
.long sys_mount
.long sys_oldumount
.long sys_setuid16
.long sys_getuid16
.long sys_stime /* 25 */
.long sys_ptrace
.long sys_alarm
.long sys_fstat
.long sys_pause
.long sys_utime /* 30 */
.long sys_ni_syscall /* old stty syscall holder */
.long sys_ni_syscall /* old gtty syscall holder */
.long sys_access
.long sys_nice
.long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
.long sys_sync
.long sys_kill
.long sys_rename
.long sys_mkdir
.long sys_rmdir /* 40 */
.long sys_dup
.long sys_pipe
.long sys_times
.long sys_ni_syscall /* old prof syscall holder */
.long sys_brk /* 45 */
.long sys_setgid16
.long sys_getgid16
.long sys_signal
.long sys_geteuid16
.long sys_getegid16 /* 50 */
.long sys_acct
.long sys_umount /* recycled never used phys() */
.long sys_ni_syscall /* old lock syscall holder */
.long sys_ioctl
.long sys_fcntl /* 55 */
.long sys_ni_syscall /* old mpx syscall holder */
.long sys_setpgid
.long sys_ni_syscall /* old ulimit syscall holder */
.long sys_ni_syscall
.long sys_umask /* 60 */
.long sys_chroot
.long sys_ustat
.long sys_dup2
.long sys_getppid
.long sys_getpgrp /* 65 */
.long sys_setsid
.long sys_sigaction
.long sys_sgetmask
.long sys_ssetmask
.long sys_setreuid16 /* 70 */
.long sys_setregid16
.long sys_sigsuspend
.long sys_sigpending
.long sys_sethostname
.long sys_setrlimit /* 75 */
.long sys_old_getrlimit
.long sys_getrusage
.long sys_gettimeofday
.long sys_settimeofday
.long sys_getgroups16 /* 80 */
.long sys_setgroups16
.long sys_old_select
.long sys_symlink
.long sys_lstat
.long sys_readlink /* 85 */
.long sys_uselib
.long sys_swapon
.long sys_reboot
.long sys_old_readdir
.long sys_old_mmap /* 90 */
.long sys_munmap
.long sys_truncate
.long sys_ftruncate
.long sys_fchmod
.long sys_fchown16 /* 95 */
.long sys_getpriority
.long sys_setpriority
.long sys_ni_syscall /* old profil syscall holder */
.long sys_statfs
.long sys_fstatfs /* 100 */
.long sys_ni_syscall /* ioperm for i386 */
.long sys_socketcall
.long sys_syslog
.long sys_setitimer
.long sys_getitimer /* 105 */
.long sys_newstat
.long sys_newlstat
.long sys_newfstat
.long sys_ni_syscall
.long sys_ni_syscall /* 110 */ /* iopl for i386 */
.long sys_vhangup
.long sys_ni_syscall /* obsolete idle() syscall */
.long sys_ni_syscall /* vm86old for i386 */
.long sys_wait4
.long sys_swapoff /* 115 */
.long sys_sysinfo
.long sys_ipc
.long sys_fsync
.long sys_sigreturn
.long sys_clone /* 120 */
.long sys_setdomainname
.long sys_newuname
.long sys_cacheflush /* modify_ldt for i386 */
.long sys_adjtimex
.long sys_mprotect /* 125 */
.long sys_sigprocmask
.long sys_ni_syscall /* old "create_module" */
.long sys_init_module
.long sys_delete_module
.long sys_ni_syscall /* 130 - old "get_kernel_syms" */
.long sys_quotactl
.long sys_getpgid
.long sys_fchdir
.long sys_bdflush
.long sys_sysfs /* 135 */
.long sys_personality
.long sys_ni_syscall /* for afs_syscall */
.long sys_setfsuid16
.long sys_setfsgid16
.long sys_llseek /* 140 */
.long sys_getdents
.long sys_select
.long sys_flock
.long sys_msync
.long sys_readv /* 145 */
.long sys_writev
.long sys_getsid
.long sys_fdatasync
.long sys_sysctl
.long sys_mlock /* 150 */
.long sys_munlock
.long sys_mlockall
.long sys_munlockall
.long sys_sched_setparam
.long sys_sched_getparam /* 155 */
.long sys_sched_setscheduler
.long sys_sched_getscheduler
.long sys_sched_yield
.long sys_sched_get_priority_max
.long sys_sched_get_priority_min /* 160 */
.long sys_sched_rr_get_interval
.long sys_nanosleep
.long sys_mremap
.long sys_setresuid16
.long sys_getresuid16 /* 165 */
.long sys_getpagesize
.long sys_ni_syscall /* old sys_query_module */
.long sys_poll
.long sys_nfsservctl
.long sys_setresgid16 /* 170 */
.long sys_getresgid16
.long sys_prctl
.long sys_rt_sigreturn
.long sys_rt_sigaction
.long sys_rt_sigprocmask /* 175 */
.long sys_rt_sigpending
.long sys_rt_sigtimedwait
.long sys_rt_sigqueueinfo
.long sys_rt_sigsuspend
.long sys_pread64 /* 180 */
.long sys_pwrite64
.long sys_lchown16;
.long sys_getcwd
.long sys_capget
.long sys_capset /* 185 */
.long sys_sigaltstack
.long sys_sendfile
.long sys_ni_syscall /* streams1 */
.long sys_ni_syscall /* streams2 */
.long sys_vfork /* 190 */
.long sys_getrlimit
.long sys_mmap2
.long sys_truncate64
.long sys_ftruncate64
.long sys_stat64 /* 195 */
.long sys_lstat64
.long sys_fstat64
.long sys_chown
.long sys_getuid
.long sys_getgid /* 200 */
.long sys_geteuid
.long sys_getegid
.long sys_setreuid
.long sys_setregid
.long sys_getgroups /* 205 */
.long sys_setgroups
.long sys_fchown
.long sys_setresuid
.long sys_getresuid
.long sys_setresgid /* 210 */
.long sys_getresgid
.long sys_lchown
.long sys_setuid
.long sys_setgid
.long sys_setfsuid /* 215 */
.long sys_setfsgid
.long sys_pivot_root
.long sys_ni_syscall
.long sys_ni_syscall
.long sys_getdents64 /* 220 */
.long sys_gettid
.long sys_tkill
.long sys_setxattr
.long sys_lsetxattr
.long sys_fsetxattr /* 225 */
.long sys_getxattr
.long sys_lgetxattr
.long sys_fgetxattr
.long sys_listxattr
.long sys_llistxattr /* 230 */
.long sys_flistxattr
.long sys_removexattr
.long sys_lremovexattr
.long sys_fremovexattr
.long sys_futex /* 235 */
.long sys_sendfile64
.long sys_mincore
.long sys_madvise
.long sys_fcntl64
.long sys_readahead /* 240 */
.long sys_io_setup
.long sys_io_destroy
.long sys_io_getevents
.long sys_io_submit
.long sys_io_cancel /* 245 */
.long sys_fadvise64
.long sys_exit_group
.long sys_lookup_dcookie
.long sys_epoll_create
.long sys_epoll_ctl /* 250 */
.long sys_epoll_wait
.long sys_remap_file_pages
.long sys_set_tid_address
.long sys_timer_create
.long sys_timer_settime /* 255 */
.long sys_timer_gettime
.long sys_timer_getoverrun
.long sys_timer_delete
.long sys_clock_settime
.long sys_clock_gettime /* 260 */
.long sys_clock_getres
.long sys_clock_nanosleep
.long sys_statfs64
.long sys_fstatfs64
.long sys_tgkill /* 265 */
.long sys_utimes
.long sys_fadvise64_64
.long sys_mbind
.long sys_get_mempolicy
.long sys_set_mempolicy /* 270 */
.long sys_mq_open
.long sys_mq_unlink
.long sys_mq_timedsend
.long sys_mq_timedreceive
.long sys_mq_notify /* 275 */
.long sys_mq_getsetattr
.long sys_waitid
.long sys_ni_syscall /* for sys_vserver */
.long sys_add_key
.long sys_request_key /* 280 */
.long sys_keyctl
.long sys_ioprio_set
.long sys_ioprio_get
.long sys_inotify_init
.long sys_inotify_add_watch /* 285 */
.long sys_inotify_rm_watch
.long sys_migrate_pages
.long sys_openat
.long sys_mkdirat
.long sys_mknodat /* 290 */
.long sys_fchownat
.long sys_futimesat
.long sys_fstatat64
.long sys_unlinkat
.long sys_renameat /* 295 */
.long sys_linkat
.long sys_symlinkat
.long sys_readlinkat
.long sys_fchmodat
.long sys_faccessat /* 300 */
.long sys_ni_syscall /* Reserved for pselect6 */
.long sys_ni_syscall /* Reserved for ppoll */
.long sys_unshare
.long sys_set_robust_list
.long sys_get_robust_list /* 305 */
.long sys_splice
.long sys_sync_file_range
.long sys_tee
.long sys_vmsplice
.long sys_move_pages /* 310 */
.long sys_sched_setaffinity
.long sys_sched_getaffinity
.long sys_kexec_load
.long sys_getcpu
.long sys_epoll_pwait /* 315 */
.long sys_utimensat
.long sys_signalfd
.long sys_timerfd_create
.long sys_eventfd
.long sys_fallocate /* 320 */
.long sys_timerfd_settime
.long sys_timerfd_gettime
.long sys_signalfd4
.long sys_eventfd2
.long sys_epoll_create1 /* 325 */
.long sys_dup3
.long sys_pipe2
.long sys_inotify_init1
.long sys_preadv
.long sys_pwritev /* 330 */
.long sys_rt_tgsigqueueinfo
.long sys_perf_event_open
.long sys_get_thread_area
.long sys_set_thread_area
.long sys_atomic_cmpxchg_32 /* 335 */
.long sys_atomic_barrier
.long sys_fanotify_init
.long sys_fanotify_mark
.long sys_prlimit64
/* -*- mode: asm -*-
*
* linux/arch/m68k/kernel/entry.S
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file README.legal in the main directory of this archive
* for more details.
*
* Linux/m68k support by Hamish Macdonald
*
* 68060 fixes by Jesper Skov
*
*/
/*
* entry.S contains the system-call and fault low-level handling routines.
* This also contains the timer-interrupt handler, as well as all interrupts
* and faults that can result in a task-switch.
*
* NOTE: This code handles signal-recognition, which happens every time
* after a timer-interrupt and after each system call.
*
*/
/*
* 12/03/96 Jes: Currently we only support m68k single-cpu systems, so
* all pointers that used to be 'current' are now entry
* number 0 in the 'current_set' list.
*
* 6/05/00 RZ: addedd writeback completion after return from sighandler
* for 68040
*/
#include <linux/linkage.h>
#include <asm/entry.h>
#include <asm/errno.h>
#include <asm/setup.h>
#include <asm/segment.h>
#include <asm/traps.h>
#include <asm/unistd.h>
#include <asm/asm-offsets.h>
.globl system_call, buserr, trap, resume
.globl sys_call_table
.globl sys_fork, sys_clone, sys_vfork
.globl ret_from_interrupt, bad_interrupt
.globl auto_irqhandler_fixup
.globl user_irqvec_fixup, user_irqhandler_fixup
.text
ENTRY(buserr)
SAVE_ALL_INT
GET_CURRENT(%d0)
movel %sp,%sp@- | stack frame pointer argument
bsrl buserr_c
addql #4,%sp
jra .Lret_from_exception
ENTRY(trap)
SAVE_ALL_INT
GET_CURRENT(%d0)
movel %sp,%sp@- | stack frame pointer argument
bsrl trap_c
addql #4,%sp
jra .Lret_from_exception
| After a fork we jump here directly from resume,
| so that %d1 contains the previous task
| schedule_tail now used regardless of CONFIG_SMP
ENTRY(ret_from_fork)
movel %d1,%sp@-
jsr schedule_tail
addql #4,%sp
jra .Lret_from_exception
do_trace_entry:
movel #-ENOSYS,%sp@(PT_OFF_D0)| needed for strace
subql #4,%sp
SAVE_SWITCH_STACK
jbsr syscall_trace
RESTORE_SWITCH_STACK
addql #4,%sp
movel %sp@(PT_OFF_ORIG_D0),%d0
cmpl #NR_syscalls,%d0
jcs syscall
badsys:
movel #-ENOSYS,%sp@(PT_OFF_D0)
jra ret_from_syscall
do_trace_exit:
subql #4,%sp
SAVE_SWITCH_STACK
jbsr syscall_trace
RESTORE_SWITCH_STACK
addql #4,%sp
jra .Lret_from_exception
ENTRY(ret_from_signal)
tstb %curptr@(TASK_INFO+TINFO_FLAGS+2)
jge 1f
jbsr syscall_trace
1: RESTORE_SWITCH_STACK
addql #4,%sp
/* on 68040 complete pending writebacks if any */
#ifdef CONFIG_M68040
bfextu %sp@(PT_OFF_FORMATVEC){#0,#4},%d0
subql #7,%d0 | bus error frame ?
jbne 1f
movel %sp,%sp@-
jbsr berr_040cleanup
addql #4,%sp
1:
#endif
jra .Lret_from_exception
ENTRY(system_call)
SAVE_ALL_SYS
GET_CURRENT(%d1)
| save top of frame
movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
| syscall trace?
tstb %curptr@(TASK_INFO+TINFO_FLAGS+2)
jmi do_trace_entry
cmpl #NR_syscalls,%d0
jcc badsys
syscall:
jbsr @(sys_call_table,%d0:l:4)@(0)
movel %d0,%sp@(PT_OFF_D0) | save the return value
ret_from_syscall:
|oriw #0x0700,%sr
movew %curptr@(TASK_INFO+TINFO_FLAGS+2),%d0
jne syscall_exit_work
1: RESTORE_ALL
syscall_exit_work:
btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
bnes 1b | if so, skip resched, signals
lslw #1,%d0
jcs do_trace_exit
jmi do_delayed_trace
lslw #8,%d0
jmi do_signal_return
pea resume_userspace
jra schedule
ENTRY(ret_from_exception)
.Lret_from_exception:
btst #5,%sp@(PT_OFF_SR) | check if returning to kernel
bnes 1f | if so, skip resched, signals
| only allow interrupts when we are really the last one on the
| kernel stack, otherwise stack overflow can occur during
| heavy interrupt load
andw #ALLOWINT,%sr
resume_userspace:
moveb %curptr@(TASK_INFO+TINFO_FLAGS+3),%d0
jne exit_work
1: RESTORE_ALL
exit_work:
| save top of frame
movel %sp,%curptr@(TASK_THREAD+THREAD_ESP0)
lslb #1,%d0
jmi do_signal_return
pea resume_userspace
jra schedule
do_signal_return:
|andw #ALLOWINT,%sr
subql #4,%sp | dummy return address
SAVE_SWITCH_STACK
pea %sp@(SWITCH_STACK_SIZE)
bsrl do_signal
addql #4,%sp
RESTORE_SWITCH_STACK
addql #4,%sp
jbra resume_userspace
do_delayed_trace:
bclr #7,%sp@(PT_OFF_SR) | clear trace bit in SR
pea 1 | send SIGTRAP
movel %curptr,%sp@-
pea LSIGTRAP
jbsr send_sig
addql #8,%sp
addql #4,%sp
jbra resume_userspace
/* This is the main interrupt handler for autovector interrupts */
ENTRY(auto_inthandler)
SAVE_ALL_INT
GET_CURRENT(%d0)
addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
| put exception # in d0
bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
subw #VEC_SPUR,%d0
movel %sp,%sp@-
movel %d0,%sp@- | put vector # on stack
auto_irqhandler_fixup = . + 2
jsr __m68k_handle_int | process the IRQ
addql #8,%sp | pop parameters off stack
ret_from_interrupt:
subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
jeq ret_from_last_interrupt
2: RESTORE_ALL
ALIGN
ret_from_last_interrupt:
moveq #(~ALLOWINT>>8)&0xff,%d0
andb %sp@(PT_OFF_SR),%d0
jne 2b
/* check if we need to do software interrupts */
tstl irq_stat+CPUSTAT_SOFTIRQ_PENDING
jeq .Lret_from_exception
pea ret_from_exception
jra do_softirq
/* Handler for user defined interrupt vectors */
ENTRY(user_inthandler)
SAVE_ALL_INT
GET_CURRENT(%d0)
addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
| put exception # in d0
bfextu %sp@(PT_OFF_FORMATVEC){#4,#10},%d0
user_irqvec_fixup = . + 2
subw #VEC_USER,%d0
movel %sp,%sp@-
movel %d0,%sp@- | put vector # on stack
user_irqhandler_fixup = . + 2
jsr __m68k_handle_int | process the IRQ
addql #8,%sp | pop parameters off stack
subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
jeq ret_from_last_interrupt
RESTORE_ALL
/* Handler for uninitialized and spurious interrupts */
ENTRY(bad_inthandler)
SAVE_ALL_INT
GET_CURRENT(%d0)
addqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
movel %sp,%sp@-
jsr handle_badint
addql #4,%sp
subqb #1,%curptr@(TASK_INFO+TINFO_PREEMPT+1)
jeq ret_from_last_interrupt
RESTORE_ALL
ENTRY(sys_fork)
SAVE_SWITCH_STACK
pea %sp@(SWITCH_STACK_SIZE)
jbsr m68k_fork
addql #4,%sp
RESTORE_SWITCH_STACK
rts
ENTRY(sys_clone)
SAVE_SWITCH_STACK
pea %sp@(SWITCH_STACK_SIZE)
jbsr m68k_clone
addql #4,%sp
RESTORE_SWITCH_STACK
rts
ENTRY(sys_vfork)
SAVE_SWITCH_STACK
pea %sp@(SWITCH_STACK_SIZE)
jbsr m68k_vfork
addql #4,%sp
RESTORE_SWITCH_STACK
rts
ENTRY(sys_sigreturn)
SAVE_SWITCH_STACK
jbsr do_sigreturn
RESTORE_SWITCH_STACK
rts
ENTRY(sys_rt_sigreturn)
SAVE_SWITCH_STACK
jbsr do_rt_sigreturn
RESTORE_SWITCH_STACK
rts
resume:
/*
* Beware - when entering resume, prev (the current task) is
* in a0, next (the new task) is in a1,so don't change these
* registers until their contents are no longer needed.
*/
/* save sr */
movew %sr,%a0@(TASK_THREAD+THREAD_SR)
/* save fs (sfc,%dfc) (may be pointing to kernel memory) */
movec %sfc,%d0
movew %d0,%a0@(TASK_THREAD+THREAD_FS)
/* save usp */
/* it is better to use a movel here instead of a movew 8*) */
movec %usp,%d0
movel %d0,%a0@(TASK_THREAD+THREAD_USP)
/* save non-scratch registers on stack */
SAVE_SWITCH_STACK
/* save current kernel stack pointer */
movel %sp,%a0@(TASK_THREAD+THREAD_KSP)
/* save floating point context */
#ifndef CONFIG_M68KFPU_EMU_ONLY
#ifdef CONFIG_M68KFPU_EMU
tstl m68k_fputype
jeq 3f
#endif
fsave %a0@(TASK_THREAD+THREAD_FPSTATE)
#if defined(CONFIG_M68060)
#if !defined(CPU_M68060_ONLY)
btst #3,m68k_cputype+3
beqs 1f
#endif
/* The 060 FPU keeps status in bits 15-8 of the first longword */
tstb %a0@(TASK_THREAD+THREAD_FPSTATE+2)
jeq 3f
#if !defined(CPU_M68060_ONLY)
jra 2f
#endif
#endif /* CONFIG_M68060 */
#if !defined(CPU_M68060_ONLY)
1: tstb %a0@(TASK_THREAD+THREAD_FPSTATE)
jeq 3f
#endif
2: fmovemx %fp0-%fp7,%a0@(TASK_THREAD+THREAD_FPREG)
fmoveml %fpcr/%fpsr/%fpiar,%a0@(TASK_THREAD+THREAD_FPCNTL)
3:
#endif /* CONFIG_M68KFPU_EMU_ONLY */
/* Return previous task in %d1 */
movel %curptr,%d1
/* switch to new task (a1 contains new task) */
movel %a1,%curptr
/* restore floating point context */
#ifndef CONFIG_M68KFPU_EMU_ONLY
#ifdef CONFIG_M68KFPU_EMU
tstl m68k_fputype
jeq 4f
#endif
#if defined(CONFIG_M68060)
#if !defined(CPU_M68060_ONLY)
btst #3,m68k_cputype+3
beqs 1f
#endif
/* The 060 FPU keeps status in bits 15-8 of the first longword */
tstb %a1@(TASK_THREAD+THREAD_FPSTATE+2)
jeq 3f
#if !defined(CPU_M68060_ONLY)
jra 2f
#endif
#endif /* CONFIG_M68060 */
#if !defined(CPU_M68060_ONLY)
1: tstb %a1@(TASK_THREAD+THREAD_FPSTATE)
jeq 3f
#endif
2: fmovemx %a1@(TASK_THREAD+THREAD_FPREG),%fp0-%fp7
fmoveml %a1@(TASK_THREAD+THREAD_FPCNTL),%fpcr/%fpsr/%fpiar
3: frestore %a1@(TASK_THREAD+THREAD_FPSTATE)
4:
#endif /* CONFIG_M68KFPU_EMU_ONLY */
/* restore the kernel stack pointer */
movel %a1@(TASK_THREAD+THREAD_KSP),%sp
/* restore non-scratch registers */
RESTORE_SWITCH_STACK
/* restore user stack pointer */
movel %a1@(TASK_THREAD+THREAD_USP),%a0
movel %a0,%usp
/* restore fs (sfc,%dfc) */
movew %a1@(TASK_THREAD+THREAD_FS),%a0
movec %a0,%sfc
movec %a0,%dfc
/* restore status register */
movew %a1@(TASK_THREAD+THREAD_SR),%sr
rts
.data
ALIGN
sys_call_table:
.long sys_restart_syscall /* 0 - old "setup()" system call, used for restarting */
.long sys_exit
.long sys_fork
.long sys_read
.long sys_write
.long sys_open /* 5 */
.long sys_close
.long sys_waitpid
.long sys_creat
.long sys_link
.long sys_unlink /* 10 */
.long sys_execve
.long sys_chdir
.long sys_time
.long sys_mknod
.long sys_chmod /* 15 */
.long sys_chown16
.long sys_ni_syscall /* old break syscall holder */
.long sys_stat
.long sys_lseek
.long sys_getpid /* 20 */
.long sys_mount
.long sys_oldumount
.long sys_setuid16
.long sys_getuid16
.long sys_stime /* 25 */
.long sys_ptrace
.long sys_alarm
.long sys_fstat
.long sys_pause
.long sys_utime /* 30 */
.long sys_ni_syscall /* old stty syscall holder */
.long sys_ni_syscall /* old gtty syscall holder */
.long sys_access
.long sys_nice
.long sys_ni_syscall /* 35 */ /* old ftime syscall holder */
.long sys_sync
.long sys_kill
.long sys_rename
.long sys_mkdir
.long sys_rmdir /* 40 */
.long sys_dup
.long sys_pipe
.long sys_times
.long sys_ni_syscall /* old prof syscall holder */
.long sys_brk /* 45 */
.long sys_setgid16
.long sys_getgid16
.long sys_signal
.long sys_geteuid16
.long sys_getegid16 /* 50 */
.long sys_acct
.long sys_umount /* recycled never used phys() */
.long sys_ni_syscall /* old lock syscall holder */
.long sys_ioctl
.long sys_fcntl /* 55 */
.long sys_ni_syscall /* old mpx syscall holder */
.long sys_setpgid
.long sys_ni_syscall /* old ulimit syscall holder */
.long sys_ni_syscall
.long sys_umask /* 60 */
.long sys_chroot
.long sys_ustat
.long sys_dup2
.long sys_getppid
.long sys_getpgrp /* 65 */
.long sys_setsid
.long sys_sigaction
.long sys_sgetmask
.long sys_ssetmask
.long sys_setreuid16 /* 70 */
.long sys_setregid16
.long sys_sigsuspend
.long sys_sigpending
.long sys_sethostname
.long sys_setrlimit /* 75 */
.long sys_old_getrlimit
.long sys_getrusage
.long sys_gettimeofday
.long sys_settimeofday
.long sys_getgroups16 /* 80 */
.long sys_setgroups16
.long sys_old_select
.long sys_symlink
.long sys_lstat
.long sys_readlink /* 85 */
.long sys_uselib
.long sys_swapon
.long sys_reboot
.long sys_old_readdir
.long sys_old_mmap /* 90 */
.long sys_munmap
.long sys_truncate
.long sys_ftruncate
.long sys_fchmod
.long sys_fchown16 /* 95 */
.long sys_getpriority
.long sys_setpriority
.long sys_ni_syscall /* old profil syscall holder */
.long sys_statfs
.long sys_fstatfs /* 100 */
.long sys_ni_syscall /* ioperm for i386 */
.long sys_socketcall
.long sys_syslog
.long sys_setitimer
.long sys_getitimer /* 105 */
.long sys_newstat
.long sys_newlstat
.long sys_newfstat
.long sys_ni_syscall
.long sys_ni_syscall /* 110 */ /* iopl for i386 */
.long sys_vhangup
.long sys_ni_syscall /* obsolete idle() syscall */
.long sys_ni_syscall /* vm86old for i386 */
.long sys_wait4
.long sys_swapoff /* 115 */
.long sys_sysinfo
.long sys_ipc
.long sys_fsync
.long sys_sigreturn
.long sys_clone /* 120 */
.long sys_setdomainname
.long sys_newuname
.long sys_cacheflush /* modify_ldt for i386 */
.long sys_adjtimex
.long sys_mprotect /* 125 */
.long sys_sigprocmask
.long sys_ni_syscall /* old "create_module" */
.long sys_init_module
.long sys_delete_module
.long sys_ni_syscall /* 130 - old "get_kernel_syms" */
.long sys_quotactl
.long sys_getpgid
.long sys_fchdir
.long sys_bdflush
.long sys_sysfs /* 135 */
.long sys_personality
.long sys_ni_syscall /* for afs_syscall */
.long sys_setfsuid16
.long sys_setfsgid16
.long sys_llseek /* 140 */
.long sys_getdents
.long sys_select
.long sys_flock
.long sys_msync
.long sys_readv /* 145 */
.long sys_writev
.long sys_getsid
.long sys_fdatasync
.long sys_sysctl
.long sys_mlock /* 150 */
.long sys_munlock
.long sys_mlockall
.long sys_munlockall
.long sys_sched_setparam
.long sys_sched_getparam /* 155 */
.long sys_sched_setscheduler
.long sys_sched_getscheduler
.long sys_sched_yield
.long sys_sched_get_priority_max
.long sys_sched_get_priority_min /* 160 */
.long sys_sched_rr_get_interval
.long sys_nanosleep
.long sys_mremap
.long sys_setresuid16
.long sys_getresuid16 /* 165 */
.long sys_getpagesize
.long sys_ni_syscall /* old sys_query_module */
.long sys_poll
.long sys_nfsservctl
.long sys_setresgid16 /* 170 */
.long sys_getresgid16
.long sys_prctl
.long sys_rt_sigreturn
.long sys_rt_sigaction
.long sys_rt_sigprocmask /* 175 */
.long sys_rt_sigpending
.long sys_rt_sigtimedwait
.long sys_rt_sigqueueinfo
.long sys_rt_sigsuspend
.long sys_pread64 /* 180 */
.long sys_pwrite64
.long sys_lchown16;
.long sys_getcwd
.long sys_capget
.long sys_capset /* 185 */
.long sys_sigaltstack
.long sys_sendfile
.long sys_ni_syscall /* streams1 */
.long sys_ni_syscall /* streams2 */
.long sys_vfork /* 190 */
.long sys_getrlimit
.long sys_mmap2
.long sys_truncate64
.long sys_ftruncate64
.long sys_stat64 /* 195 */
.long sys_lstat64
.long sys_fstat64
.long sys_chown
.long sys_getuid
.long sys_getgid /* 200 */
.long sys_geteuid
.long sys_getegid
.long sys_setreuid
.long sys_setregid
.long sys_getgroups /* 205 */
.long sys_setgroups
.long sys_fchown
.long sys_setresuid
.long sys_getresuid
.long sys_setresgid /* 210 */
.long sys_getresgid
.long sys_lchown
.long sys_setuid
.long sys_setgid
.long sys_setfsuid /* 215 */
.long sys_setfsgid
.long sys_pivot_root
.long sys_ni_syscall
.long sys_ni_syscall
.long sys_getdents64 /* 220 */
.long sys_gettid
.long sys_tkill
.long sys_setxattr
.long sys_lsetxattr
.long sys_fsetxattr /* 225 */
.long sys_getxattr
.long sys_lgetxattr
.long sys_fgetxattr
.long sys_listxattr
.long sys_llistxattr /* 230 */
.long sys_flistxattr
.long sys_removexattr
.long sys_lremovexattr
.long sys_fremovexattr
.long sys_futex /* 235 */
.long sys_sendfile64
.long sys_mincore
.long sys_madvise
.long sys_fcntl64
.long sys_readahead /* 240 */
.long sys_io_setup
.long sys_io_destroy
.long sys_io_getevents
.long sys_io_submit
.long sys_io_cancel /* 245 */
.long sys_fadvise64
.long sys_exit_group
.long sys_lookup_dcookie
.long sys_epoll_create
.long sys_epoll_ctl /* 250 */
.long sys_epoll_wait
.long sys_remap_file_pages
.long sys_set_tid_address
.long sys_timer_create
.long sys_timer_settime /* 255 */
.long sys_timer_gettime
.long sys_timer_getoverrun
.long sys_timer_delete
.long sys_clock_settime
.long sys_clock_gettime /* 260 */
.long sys_clock_getres
.long sys_clock_nanosleep
.long sys_statfs64
.long sys_fstatfs64
.long sys_tgkill /* 265 */
.long sys_utimes
.long sys_fadvise64_64
.long sys_mbind
.long sys_get_mempolicy
.long sys_set_mempolicy /* 270 */
.long sys_mq_open
.long sys_mq_unlink
.long sys_mq_timedsend
.long sys_mq_timedreceive
.long sys_mq_notify /* 275 */
.long sys_mq_getsetattr
.long sys_waitid
.long sys_ni_syscall /* for sys_vserver */
.long sys_add_key
.long sys_request_key /* 280 */
.long sys_keyctl
.long sys_ioprio_set
.long sys_ioprio_get
.long sys_inotify_init
.long sys_inotify_add_watch /* 285 */
.long sys_inotify_rm_watch
.long sys_migrate_pages
.long sys_openat
.long sys_mkdirat
.long sys_mknodat /* 290 */
.long sys_fchownat
.long sys_futimesat
.long sys_fstatat64
.long sys_unlinkat
.long sys_renameat /* 295 */
.long sys_linkat
.long sys_symlinkat
.long sys_readlinkat
.long sys_fchmodat
.long sys_faccessat /* 300 */
.long sys_ni_syscall /* Reserved for pselect6 */
.long sys_ni_syscall /* Reserved for ppoll */
.long sys_unshare
.long sys_set_robust_list
.long sys_get_robust_list /* 305 */
.long sys_splice
.long sys_sync_file_range
.long sys_tee
.long sys_vmsplice
.long sys_move_pages /* 310 */
.long sys_sched_setaffinity
.long sys_sched_getaffinity
.long sys_kexec_load
.long sys_getcpu
.long sys_epoll_pwait /* 315 */
.long sys_utimensat
.long sys_signalfd
.long sys_timerfd_create
.long sys_eventfd
.long sys_fallocate /* 320 */
.long sys_timerfd_settime
.long sys_timerfd_gettime
.long sys_signalfd4
.long sys_eventfd2
.long sys_epoll_create1 /* 325 */
.long sys_dup3
.long sys_pipe2
.long sys_inotify_init1
.long sys_preadv
.long sys_pwritev /* 330 */
.long sys_rt_tgsigqueueinfo
.long sys_perf_event_open
.long sys_get_thread_area
.long sys_set_thread_area
.long sys_atomic_cmpxchg_32 /* 335 */
.long sys_atomic_barrier
.long sys_fanotify_init
.long sys_fanotify_mark
.long sys_prlimit64
#include <linux/module.h> #ifdef CONFIG_MMU
#include "m68k_ksyms_mm.c"
asmlinkage long long __ashldi3 (long long, int); #else
asmlinkage long long __ashrdi3 (long long, int); #include "m68k_ksyms_no.c"
asmlinkage long long __lshrdi3 (long long, int); #endif
asmlinkage long long __muldi3 (long long, long long);
/* The following are special because they're not called
explicitly (the C compiler generates them). Fortunately,
their interface isn't gonna change any time soon now, so
it's OK to leave it out of version control. */
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(__muldi3);
#include <linux/module.h>
asmlinkage long long __ashldi3 (long long, int);
asmlinkage long long __ashrdi3 (long long, int);
asmlinkage long long __lshrdi3 (long long, int);
asmlinkage long long __muldi3 (long long, long long);
/* The following are special because they're not called
explicitly (the C compiler generates them). Fortunately,
their interface isn't gonna change any time soon now, so
it's OK to leave it out of version control. */
EXPORT_SYMBOL(__ashldi3);
EXPORT_SYMBOL(__ashrdi3);
EXPORT_SYMBOL(__lshrdi3);
EXPORT_SYMBOL(__muldi3);
/* #ifdef CONFIG_MMU
* This file is subject to the terms and conditions of the GNU General Public #include "module_mm.c"
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#include <linux/moduleloader.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
#if 0
#define DEBUGP printk
#else #else
#define DEBUGP(fmt...) #include "module_no.c"
#endif #endif
#ifdef CONFIG_MODULES
void *module_alloc(unsigned long size)
{
if (size == 0)
return NULL;
return vmalloc(size);
}
/* Free memory returned from module_alloc */
void module_free(struct module *mod, void *module_region)
{
vfree(module_region);
}
/* We don't need anything special. */
int module_frob_arch_sections(Elf_Ehdr *hdr,
Elf_Shdr *sechdrs,
char *secstrings,
struct module *mod)
{
return 0;
}
int apply_relocate(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
unsigned int i;
Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym;
uint32_t *location;
DEBUGP("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[i].r_offset;
/* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ ELF32_R_SYM(rel[i].r_info);
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_68K_32:
/* We add the value into the location given */
*location += sym->st_value;
break;
case R_68K_PC32:
/* Add the value, subtract its postition */
*location += sym->st_value - (uint32_t)location;
break;
default:
printk(KERN_ERR "module %s: Unknown relocation: %u\n",
me->name, ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
}
return 0;
}
int apply_relocate_add(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
unsigned int i;
Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym;
uint32_t *location;
DEBUGP("Applying relocate_add section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[i].r_offset;
/* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ ELF32_R_SYM(rel[i].r_info);
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_68K_32:
/* We add the value into the location given */
*location = rel[i].r_addend + sym->st_value;
break;
case R_68K_PC32:
/* Add the value, subtract its postition */
*location = rel[i].r_addend + sym->st_value - (uint32_t)location;
break;
default:
printk(KERN_ERR "module %s: Unknown relocation: %u\n",
me->name, ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
}
return 0;
}
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *mod)
{
module_fixup(mod, mod->arch.fixup_start, mod->arch.fixup_end);
return 0;
}
void module_arch_cleanup(struct module *mod)
{
}
#endif /* CONFIG_MODULES */
void module_fixup(struct module *mod, struct m68k_fixup_info *start,
struct m68k_fixup_info *end)
{
struct m68k_fixup_info *fixup;
for (fixup = start; fixup < end; fixup++) {
switch (fixup->type) {
case m68k_fixup_memoffset:
*(u32 *)fixup->addr = m68k_memoffset;
break;
case m68k_fixup_vnode_shift:
*(u16 *)fixup->addr += m68k_virt_to_node_shift;
break;
}
}
}
/*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
#include <linux/moduleloader.h>
#include <linux/elf.h>
#include <linux/vmalloc.h>
#include <linux/fs.h>
#include <linux/string.h>
#include <linux/kernel.h>
#if 0
#define DEBUGP printk
#else
#define DEBUGP(fmt...)
#endif
#ifdef CONFIG_MODULES
void *module_alloc(unsigned long size)
{
if (size == 0)
return NULL;
return vmalloc(size);
}
/* Free memory returned from module_alloc */
void module_free(struct module *mod, void *module_region)
{
vfree(module_region);
}
/* We don't need anything special. */
int module_frob_arch_sections(Elf_Ehdr *hdr,
Elf_Shdr *sechdrs,
char *secstrings,
struct module *mod)
{
return 0;
}
int apply_relocate(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
unsigned int i;
Elf32_Rel *rel = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym;
uint32_t *location;
DEBUGP("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[i].r_offset;
/* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ ELF32_R_SYM(rel[i].r_info);
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_68K_32:
/* We add the value into the location given */
*location += sym->st_value;
break;
case R_68K_PC32:
/* Add the value, subtract its postition */
*location += sym->st_value - (uint32_t)location;
break;
default:
printk(KERN_ERR "module %s: Unknown relocation: %u\n",
me->name, ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
}
return 0;
}
int apply_relocate_add(Elf32_Shdr *sechdrs,
const char *strtab,
unsigned int symindex,
unsigned int relsec,
struct module *me)
{
unsigned int i;
Elf32_Rela *rel = (void *)sechdrs[relsec].sh_addr;
Elf32_Sym *sym;
uint32_t *location;
DEBUGP("Applying relocate_add section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[i].r_offset;
/* This is the symbol it is referring to. Note that all
undefined symbols have been resolved. */
sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ ELF32_R_SYM(rel[i].r_info);
switch (ELF32_R_TYPE(rel[i].r_info)) {
case R_68K_32:
/* We add the value into the location given */
*location = rel[i].r_addend + sym->st_value;
break;
case R_68K_PC32:
/* Add the value, subtract its postition */
*location = rel[i].r_addend + sym->st_value - (uint32_t)location;
break;
default:
printk(KERN_ERR "module %s: Unknown relocation: %u\n",
me->name, ELF32_R_TYPE(rel[i].r_info));
return -ENOEXEC;
}
}
return 0;
}
int module_finalize(const Elf_Ehdr *hdr,
const Elf_Shdr *sechdrs,
struct module *mod)
{
module_fixup(mod, mod->arch.fixup_start, mod->arch.fixup_end);
return 0;
}
void module_arch_cleanup(struct module *mod)
{
}
#endif /* CONFIG_MODULES */
void module_fixup(struct module *mod, struct m68k_fixup_info *start,
struct m68k_fixup_info *end)
{
struct m68k_fixup_info *fixup;
for (fixup = start; fixup < end; fixup++) {
switch (fixup->type) {
case m68k_fixup_memoffset:
*(u32 *)fixup->addr = m68k_memoffset;
break;
case m68k_fixup_vnode_shift:
*(u16 *)fixup->addr += m68k_virt_to_node_shift;
break;
}
}
}
/* #ifdef CONFIG_MMU
* linux/arch/m68k/kernel/process.c #include "process_mm.c"
*
* Copyright (C) 1995 Hamish Macdonald
*
* 68060 fixes by Jesper Skov
*/
/*
* This file handles the architecture-dependent parts of process handling..
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/reboot.h>
#include <linux/init_task.h>
#include <linux/mqueue.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/traps.h>
#include <asm/machdep.h>
#include <asm/setup.h>
#include <asm/pgtable.h>
/*
* Initial task/thread structure. Make this a per-architecture thing,
* because different architectures tend to have different
* alignment requirements and potentially different initial
* setup.
*/
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
union thread_union init_thread_union __init_task_data
__attribute__((aligned(THREAD_SIZE))) =
{ INIT_THREAD_INFO(init_task) };
/* initial task structure */
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
asmlinkage void ret_from_fork(void);
/*
* Return saved PC from a blocked thread
*/
unsigned long thread_saved_pc(struct task_struct *tsk)
{
struct switch_stack *sw = (struct switch_stack *)tsk->thread.ksp;
/* Check whether the thread is blocked in resume() */
if (in_sched_functions(sw->retpc))
return ((unsigned long *)sw->a6)[1];
else
return sw->retpc;
}
/*
* The idle loop on an m68k..
*/
static void default_idle(void)
{
if (!need_resched())
#if defined(MACH_ATARI_ONLY)
/* block out HSYNC on the atari (falcon) */
__asm__("stop #0x2200" : : : "cc");
#else #else
__asm__("stop #0x2000" : : : "cc"); #include "process_no.c"
#endif #endif
}
void (*idle)(void) = default_idle;
/*
* The idle thread. There's no useful work to be
* done, so just try to conserve power and have a
* low exit latency (ie sit in a loop waiting for
* somebody to say that they'd like to reschedule)
*/
void cpu_idle(void)
{
/* endless idle loop with no priority at all */
while (1) {
while (!need_resched())
idle();
preempt_enable_no_resched();
schedule();
preempt_disable();
}
}
void machine_restart(char * __unused)
{
if (mach_reset)
mach_reset();
for (;;);
}
void machine_halt(void)
{
if (mach_halt)
mach_halt();
for (;;);
}
void machine_power_off(void)
{
if (mach_power_off)
mach_power_off();
for (;;);
}
void (*pm_power_off)(void) = machine_power_off;
EXPORT_SYMBOL(pm_power_off);
void show_regs(struct pt_regs * regs)
{
printk("\n");
printk("Format %02x Vector: %04x PC: %08lx Status: %04x %s\n",
regs->format, regs->vector, regs->pc, regs->sr, print_tainted());
printk("ORIG_D0: %08lx D0: %08lx A2: %08lx A1: %08lx\n",
regs->orig_d0, regs->d0, regs->a2, regs->a1);
printk("A0: %08lx D5: %08lx D4: %08lx\n",
regs->a0, regs->d5, regs->d4);
printk("D3: %08lx D2: %08lx D1: %08lx\n",
regs->d3, regs->d2, regs->d1);
if (!(regs->sr & PS_S))
printk("USP: %08lx\n", rdusp());
}
/*
* Create a kernel thread
*/
int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
{
int pid;
mm_segment_t fs;
fs = get_fs();
set_fs (KERNEL_DS);
{
register long retval __asm__ ("d0");
register long clone_arg __asm__ ("d1") = flags | CLONE_VM | CLONE_UNTRACED;
retval = __NR_clone;
__asm__ __volatile__
("clrl %%d2\n\t"
"trap #0\n\t" /* Linux/m68k system call */
"tstl %0\n\t" /* child or parent */
"jne 1f\n\t" /* parent - jump */
"lea %%sp@(%c7),%6\n\t" /* reload current */
"movel %6@,%6\n\t"
"movel %3,%%sp@-\n\t" /* push argument */
"jsr %4@\n\t" /* call fn */
"movel %0,%%d1\n\t" /* pass exit value */
"movel %2,%%d0\n\t" /* exit */
"trap #0\n"
"1:"
: "+d" (retval)
: "i" (__NR_clone), "i" (__NR_exit),
"r" (arg), "a" (fn), "d" (clone_arg), "r" (current),
"i" (-THREAD_SIZE)
: "d2");
pid = retval;
}
set_fs (fs);
return pid;
}
EXPORT_SYMBOL(kernel_thread);
void flush_thread(void)
{
unsigned long zero = 0;
set_fs(USER_DS);
current->thread.fs = __USER_DS;
if (!FPU_IS_EMU)
asm volatile (".chip 68k/68881\n\t"
"frestore %0@\n\t"
".chip 68k" : : "a" (&zero));
}
/*
* "m68k_fork()".. By the time we get here, the
* non-volatile registers have also been saved on the
* stack. We do some ugly pointer stuff here.. (see
* also copy_thread)
*/
asmlinkage int m68k_fork(struct pt_regs *regs)
{
return do_fork(SIGCHLD, rdusp(), regs, 0, NULL, NULL);
}
asmlinkage int m68k_vfork(struct pt_regs *regs)
{
return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0,
NULL, NULL);
}
asmlinkage int m68k_clone(struct pt_regs *regs)
{
unsigned long clone_flags;
unsigned long newsp;
int __user *parent_tidptr, *child_tidptr;
/* syscall2 puts clone_flags in d1 and usp in d2 */
clone_flags = regs->d1;
newsp = regs->d2;
parent_tidptr = (int __user *)regs->d3;
child_tidptr = (int __user *)regs->d4;
if (!newsp)
newsp = rdusp();
return do_fork(clone_flags, newsp, regs, 0,
parent_tidptr, child_tidptr);
}
int copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long unused,
struct task_struct * p, struct pt_regs * regs)
{
struct pt_regs * childregs;
struct switch_stack * childstack, *stack;
unsigned long *retp;
childregs = (struct pt_regs *) (task_stack_page(p) + THREAD_SIZE) - 1;
*childregs = *regs;
childregs->d0 = 0;
retp = ((unsigned long *) regs);
stack = ((struct switch_stack *) retp) - 1;
childstack = ((struct switch_stack *) childregs) - 1;
*childstack = *stack;
childstack->retpc = (unsigned long)ret_from_fork;
p->thread.usp = usp;
p->thread.ksp = (unsigned long)childstack;
if (clone_flags & CLONE_SETTLS)
task_thread_info(p)->tp_value = regs->d5;
/*
* Must save the current SFC/DFC value, NOT the value when
* the parent was last descheduled - RGH 10-08-96
*/
p->thread.fs = get_fs().seg;
if (!FPU_IS_EMU) {
/* Copy the current fpu state */
asm volatile ("fsave %0" : : "m" (p->thread.fpstate[0]) : "memory");
if (!CPU_IS_060 ? p->thread.fpstate[0] : p->thread.fpstate[2])
asm volatile ("fmovemx %/fp0-%/fp7,%0\n\t"
"fmoveml %/fpiar/%/fpcr/%/fpsr,%1"
: : "m" (p->thread.fp[0]), "m" (p->thread.fpcntl[0])
: "memory");
/* Restore the state in case the fpu was busy */
asm volatile ("frestore %0" : : "m" (p->thread.fpstate[0]));
}
return 0;
}
/* Fill in the fpu structure for a core dump. */
int dump_fpu (struct pt_regs *regs, struct user_m68kfp_struct *fpu)
{
char fpustate[216];
if (FPU_IS_EMU) {
int i;
memcpy(fpu->fpcntl, current->thread.fpcntl, 12);
memcpy(fpu->fpregs, current->thread.fp, 96);
/* Convert internal fpu reg representation
* into long double format
*/
for (i = 0; i < 24; i += 3)
fpu->fpregs[i] = ((fpu->fpregs[i] & 0xffff0000) << 15) |
((fpu->fpregs[i] & 0x0000ffff) << 16);
return 1;
}
/* First dump the fpu context to avoid protocol violation. */
asm volatile ("fsave %0" :: "m" (fpustate[0]) : "memory");
if (!CPU_IS_060 ? !fpustate[0] : !fpustate[2])
return 0;
asm volatile ("fmovem %/fpiar/%/fpcr/%/fpsr,%0"
:: "m" (fpu->fpcntl[0])
: "memory");
asm volatile ("fmovemx %/fp0-%/fp7,%0"
:: "m" (fpu->fpregs[0])
: "memory");
return 1;
}
EXPORT_SYMBOL(dump_fpu);
/*
* sys_execve() executes a new program.
*/
asmlinkage int sys_execve(const char __user *name,
const char __user *const __user *argv,
const char __user *const __user *envp)
{
int error;
char * filename;
struct pt_regs *regs = (struct pt_regs *) &name;
filename = getname(name);
error = PTR_ERR(filename);
if (IS_ERR(filename))
return error;
error = do_execve(filename, argv, envp, regs);
putname(filename);
return error;
}
unsigned long get_wchan(struct task_struct *p)
{
unsigned long fp, pc;
unsigned long stack_page;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
stack_page = (unsigned long)task_stack_page(p);
fp = ((struct switch_stack *)p->thread.ksp)->a6;
do {
if (fp < stack_page+sizeof(struct thread_info) ||
fp >= 8184+stack_page)
return 0;
pc = ((unsigned long *)fp)[1];
if (!in_sched_functions(pc))
return pc;
fp = *(unsigned long *) fp;
} while (count++ < 16);
return 0;
}
/*
* linux/arch/m68k/kernel/process.c
*
* Copyright (C) 1995 Hamish Macdonald
*
* 68060 fixes by Jesper Skov
*/
/*
* This file handles the architecture-dependent parts of process handling..
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/fs.h>
#include <linux/smp.h>
#include <linux/stddef.h>
#include <linux/unistd.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/reboot.h>
#include <linux/init_task.h>
#include <linux/mqueue.h>
#include <asm/uaccess.h>
#include <asm/system.h>
#include <asm/traps.h>
#include <asm/machdep.h>
#include <asm/setup.h>
#include <asm/pgtable.h>
/*
* Initial task/thread structure. Make this a per-architecture thing,
* because different architectures tend to have different
* alignment requirements and potentially different initial
* setup.
*/
static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
union thread_union init_thread_union __init_task_data
__attribute__((aligned(THREAD_SIZE))) =
{ INIT_THREAD_INFO(init_task) };
/* initial task structure */
struct task_struct init_task = INIT_TASK(init_task);
EXPORT_SYMBOL(init_task);
asmlinkage void ret_from_fork(void);
/*
* Return saved PC from a blocked thread
*/
unsigned long thread_saved_pc(struct task_struct *tsk)
{
struct switch_stack *sw = (struct switch_stack *)tsk->thread.ksp;
/* Check whether the thread is blocked in resume() */
if (in_sched_functions(sw->retpc))
return ((unsigned long *)sw->a6)[1];
else
return sw->retpc;
}
/*
* The idle loop on an m68k..
*/
static void default_idle(void)
{
if (!need_resched())
#if defined(MACH_ATARI_ONLY)
/* block out HSYNC on the atari (falcon) */
__asm__("stop #0x2200" : : : "cc");
#else
__asm__("stop #0x2000" : : : "cc");
#endif
}
void (*idle)(void) = default_idle;
/*
* The idle thread. There's no useful work to be
* done, so just try to conserve power and have a
* low exit latency (ie sit in a loop waiting for
* somebody to say that they'd like to reschedule)
*/
void cpu_idle(void)
{
/* endless idle loop with no priority at all */
while (1) {
while (!need_resched())
idle();
preempt_enable_no_resched();
schedule();
preempt_disable();
}
}
void machine_restart(char * __unused)
{
if (mach_reset)
mach_reset();
for (;;);
}
void machine_halt(void)
{
if (mach_halt)
mach_halt();
for (;;);
}
void machine_power_off(void)
{
if (mach_power_off)
mach_power_off();
for (;;);
}
void (*pm_power_off)(void) = machine_power_off;
EXPORT_SYMBOL(pm_power_off);
void show_regs(struct pt_regs * regs)
{
printk("\n");
printk("Format %02x Vector: %04x PC: %08lx Status: %04x %s\n",
regs->format, regs->vector, regs->pc, regs->sr, print_tainted());
printk("ORIG_D0: %08lx D0: %08lx A2: %08lx A1: %08lx\n",
regs->orig_d0, regs->d0, regs->a2, regs->a1);
printk("A0: %08lx D5: %08lx D4: %08lx\n",
regs->a0, regs->d5, regs->d4);
printk("D3: %08lx D2: %08lx D1: %08lx\n",
regs->d3, regs->d2, regs->d1);
if (!(regs->sr & PS_S))
printk("USP: %08lx\n", rdusp());
}
/*
* Create a kernel thread
*/
int kernel_thread(int (*fn)(void *), void * arg, unsigned long flags)
{
int pid;
mm_segment_t fs;
fs = get_fs();
set_fs (KERNEL_DS);
{
register long retval __asm__ ("d0");
register long clone_arg __asm__ ("d1") = flags | CLONE_VM | CLONE_UNTRACED;
retval = __NR_clone;
__asm__ __volatile__
("clrl %%d2\n\t"
"trap #0\n\t" /* Linux/m68k system call */
"tstl %0\n\t" /* child or parent */
"jne 1f\n\t" /* parent - jump */
"lea %%sp@(%c7),%6\n\t" /* reload current */
"movel %6@,%6\n\t"
"movel %3,%%sp@-\n\t" /* push argument */
"jsr %4@\n\t" /* call fn */
"movel %0,%%d1\n\t" /* pass exit value */
"movel %2,%%d0\n\t" /* exit */
"trap #0\n"
"1:"
: "+d" (retval)
: "i" (__NR_clone), "i" (__NR_exit),
"r" (arg), "a" (fn), "d" (clone_arg), "r" (current),
"i" (-THREAD_SIZE)
: "d2");
pid = retval;
}
set_fs (fs);
return pid;
}
EXPORT_SYMBOL(kernel_thread);
void flush_thread(void)
{
unsigned long zero = 0;
set_fs(USER_DS);
current->thread.fs = __USER_DS;
if (!FPU_IS_EMU)
asm volatile (".chip 68k/68881\n\t"
"frestore %0@\n\t"
".chip 68k" : : "a" (&zero));
}
/*
* "m68k_fork()".. By the time we get here, the
* non-volatile registers have also been saved on the
* stack. We do some ugly pointer stuff here.. (see
* also copy_thread)
*/
asmlinkage int m68k_fork(struct pt_regs *regs)
{
return do_fork(SIGCHLD, rdusp(), regs, 0, NULL, NULL);
}
asmlinkage int m68k_vfork(struct pt_regs *regs)
{
return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(), regs, 0,
NULL, NULL);
}
asmlinkage int m68k_clone(struct pt_regs *regs)
{
unsigned long clone_flags;
unsigned long newsp;
int __user *parent_tidptr, *child_tidptr;
/* syscall2 puts clone_flags in d1 and usp in d2 */
clone_flags = regs->d1;
newsp = regs->d2;
parent_tidptr = (int __user *)regs->d3;
child_tidptr = (int __user *)regs->d4;
if (!newsp)
newsp = rdusp();
return do_fork(clone_flags, newsp, regs, 0,
parent_tidptr, child_tidptr);
}
int copy_thread(unsigned long clone_flags, unsigned long usp,
unsigned long unused,
struct task_struct * p, struct pt_regs * regs)
{
struct pt_regs * childregs;
struct switch_stack * childstack, *stack;
unsigned long *retp;
childregs = (struct pt_regs *) (task_stack_page(p) + THREAD_SIZE) - 1;
*childregs = *regs;
childregs->d0 = 0;
retp = ((unsigned long *) regs);
stack = ((struct switch_stack *) retp) - 1;
childstack = ((struct switch_stack *) childregs) - 1;
*childstack = *stack;
childstack->retpc = (unsigned long)ret_from_fork;
p->thread.usp = usp;
p->thread.ksp = (unsigned long)childstack;
if (clone_flags & CLONE_SETTLS)
task_thread_info(p)->tp_value = regs->d5;
/*
* Must save the current SFC/DFC value, NOT the value when
* the parent was last descheduled - RGH 10-08-96
*/
p->thread.fs = get_fs().seg;
if (!FPU_IS_EMU) {
/* Copy the current fpu state */
asm volatile ("fsave %0" : : "m" (p->thread.fpstate[0]) : "memory");
if (!CPU_IS_060 ? p->thread.fpstate[0] : p->thread.fpstate[2])
asm volatile ("fmovemx %/fp0-%/fp7,%0\n\t"
"fmoveml %/fpiar/%/fpcr/%/fpsr,%1"
: : "m" (p->thread.fp[0]), "m" (p->thread.fpcntl[0])
: "memory");
/* Restore the state in case the fpu was busy */
asm volatile ("frestore %0" : : "m" (p->thread.fpstate[0]));
}
return 0;
}
/* Fill in the fpu structure for a core dump. */
int dump_fpu (struct pt_regs *regs, struct user_m68kfp_struct *fpu)
{
char fpustate[216];
if (FPU_IS_EMU) {
int i;
memcpy(fpu->fpcntl, current->thread.fpcntl, 12);
memcpy(fpu->fpregs, current->thread.fp, 96);
/* Convert internal fpu reg representation
* into long double format
*/
for (i = 0; i < 24; i += 3)
fpu->fpregs[i] = ((fpu->fpregs[i] & 0xffff0000) << 15) |
((fpu->fpregs[i] & 0x0000ffff) << 16);
return 1;
}
/* First dump the fpu context to avoid protocol violation. */
asm volatile ("fsave %0" :: "m" (fpustate[0]) : "memory");
if (!CPU_IS_060 ? !fpustate[0] : !fpustate[2])
return 0;
asm volatile ("fmovem %/fpiar/%/fpcr/%/fpsr,%0"
:: "m" (fpu->fpcntl[0])
: "memory");
asm volatile ("fmovemx %/fp0-%/fp7,%0"
:: "m" (fpu->fpregs[0])
: "memory");
return 1;
}
EXPORT_SYMBOL(dump_fpu);
/*
* sys_execve() executes a new program.
*/
asmlinkage int sys_execve(const char __user *name,
const char __user *const __user *argv,
const char __user *const __user *envp)
{
int error;
char * filename;
struct pt_regs *regs = (struct pt_regs *) &name;
filename = getname(name);
error = PTR_ERR(filename);
if (IS_ERR(filename))
return error;
error = do_execve(filename, argv, envp, regs);
putname(filename);
return error;
}
unsigned long get_wchan(struct task_struct *p)
{
unsigned long fp, pc;
unsigned long stack_page;
int count = 0;
if (!p || p == current || p->state == TASK_RUNNING)
return 0;
stack_page = (unsigned long)task_stack_page(p);
fp = ((struct switch_stack *)p->thread.ksp)->a6;
do {
if (fp < stack_page+sizeof(struct thread_info) ||
fp >= 8184+stack_page)
return 0;
pc = ((unsigned long *)fp)[1];
if (!in_sched_functions(pc))
return pc;
fp = *(unsigned long *) fp;
} while (count++ < 16);
return 0;
}
/* #ifdef CONFIG_MMU
* linux/arch/m68k/kernel/ptrace.c #include "ptrace_mm.c"
* #else
* Copyright (C) 1994 by Hamish Macdonald #include "ptrace_no.c"
* Taken from linux/kernel/ptrace.c and modified for M680x0. #endif
* linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of
* this archive for more details.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/signal.h>
#include <asm/uaccess.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/processor.h>
/*
* does not yet catch signals sent when the child dies.
* in exit.c or in signal.c.
*/
/* determines which bits in the SR the user has access to. */
/* 1 = access 0 = no access */
#define SR_MASK 0x001f
/* sets the trace bits. */
#define TRACE_BITS 0xC000
#define T1_BIT 0x8000
#define T0_BIT 0x4000
/* Find the stack offset for a register, relative to thread.esp0. */
#define PT_REG(reg) ((long)&((struct pt_regs *)0)->reg)
#define SW_REG(reg) ((long)&((struct switch_stack *)0)->reg \
- sizeof(struct switch_stack))
/* Mapping from PT_xxx to the stack offset at which the register is
saved. Notice that usp has no stack-slot and needs to be treated
specially (see get_reg/put_reg below). */
static const int regoff[] = {
[0] = PT_REG(d1),
[1] = PT_REG(d2),
[2] = PT_REG(d3),
[3] = PT_REG(d4),
[4] = PT_REG(d5),
[5] = SW_REG(d6),
[6] = SW_REG(d7),
[7] = PT_REG(a0),
[8] = PT_REG(a1),
[9] = PT_REG(a2),
[10] = SW_REG(a3),
[11] = SW_REG(a4),
[12] = SW_REG(a5),
[13] = SW_REG(a6),
[14] = PT_REG(d0),
[15] = -1,
[16] = PT_REG(orig_d0),
[17] = PT_REG(sr),
[18] = PT_REG(pc),
};
/*
* Get contents of register REGNO in task TASK.
*/
static inline long get_reg(struct task_struct *task, int regno)
{
unsigned long *addr;
if (regno == PT_USP)
addr = &task->thread.usp;
else if (regno < ARRAY_SIZE(regoff))
addr = (unsigned long *)(task->thread.esp0 + regoff[regno]);
else
return 0;
/* Need to take stkadj into account. */
if (regno == PT_SR || regno == PT_PC) {
long stkadj = *(long *)(task->thread.esp0 + PT_REG(stkadj));
addr = (unsigned long *) ((unsigned long)addr + stkadj);
/* The sr is actually a 16 bit register. */
if (regno == PT_SR)
return *(unsigned short *)addr;
}
return *addr;
}
/*
* Write contents of register REGNO in task TASK.
*/
static inline int put_reg(struct task_struct *task, int regno,
unsigned long data)
{
unsigned long *addr;
if (regno == PT_USP)
addr = &task->thread.usp;
else if (regno < ARRAY_SIZE(regoff))
addr = (unsigned long *)(task->thread.esp0 + regoff[regno]);
else
return -1;
/* Need to take stkadj into account. */
if (regno == PT_SR || regno == PT_PC) {
long stkadj = *(long *)(task->thread.esp0 + PT_REG(stkadj));
addr = (unsigned long *) ((unsigned long)addr + stkadj);
/* The sr is actually a 16 bit register. */
if (regno == PT_SR) {
*(unsigned short *)addr = data;
return 0;
}
}
*addr = data;
return 0;
}
/*
* Make sure the single step bit is not set.
*/
static inline void singlestep_disable(struct task_struct *child)
{
unsigned long tmp = get_reg(child, PT_SR) & ~TRACE_BITS;
put_reg(child, PT_SR, tmp);
clear_tsk_thread_flag(child, TIF_DELAYED_TRACE);
}
/*
* Called by kernel/ptrace.c when detaching..
*/
void ptrace_disable(struct task_struct *child)
{
singlestep_disable(child);
}
void user_enable_single_step(struct task_struct *child)
{
unsigned long tmp = get_reg(child, PT_SR) & ~TRACE_BITS;
put_reg(child, PT_SR, tmp | T1_BIT);
set_tsk_thread_flag(child, TIF_DELAYED_TRACE);
}
void user_enable_block_step(struct task_struct *child)
{
unsigned long tmp = get_reg(child, PT_SR) & ~TRACE_BITS;
put_reg(child, PT_SR, tmp | T0_BIT);
}
void user_disable_single_step(struct task_struct *child)
{
singlestep_disable(child);
}
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
unsigned long tmp;
int i, ret = 0;
int regno = addr >> 2; /* temporary hack. */
unsigned long __user *datap = (unsigned long __user *) data;
switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR:
if (addr & 3)
goto out_eio;
if (regno >= 0 && regno < 19) {
tmp = get_reg(child, regno);
} else if (regno >= 21 && regno < 49) {
tmp = child->thread.fp[regno - 21];
/* Convert internal fpu reg representation
* into long double format
*/
if (FPU_IS_EMU && (regno < 45) && !(regno % 3))
tmp = ((tmp & 0xffff0000) << 15) |
((tmp & 0x0000ffff) << 16);
} else
goto out_eio;
ret = put_user(tmp, datap);
break;
case PTRACE_POKEUSR:
/* write the word at location addr in the USER area */
if (addr & 3)
goto out_eio;
if (regno == PT_SR) {
data &= SR_MASK;
data |= get_reg(child, PT_SR) & ~SR_MASK;
}
if (regno >= 0 && regno < 19) {
if (put_reg(child, regno, data))
goto out_eio;
} else if (regno >= 21 && regno < 48) {
/* Convert long double format
* into internal fpu reg representation
*/
if (FPU_IS_EMU && (regno < 45) && !(regno % 3)) {
data <<= 15;
data = (data & 0xffff0000) |
((data & 0x0000ffff) >> 1);
}
child->thread.fp[regno - 21] = data;
} else
goto out_eio;
break;
case PTRACE_GETREGS: /* Get all gp regs from the child. */
for (i = 0; i < 19; i++) {
tmp = get_reg(child, i);
ret = put_user(tmp, datap);
if (ret)
break;
datap++;
}
break;
case PTRACE_SETREGS: /* Set all gp regs in the child. */
for (i = 0; i < 19; i++) {
ret = get_user(tmp, datap);
if (ret)
break;
if (i == PT_SR) {
tmp &= SR_MASK;
tmp |= get_reg(child, PT_SR) & ~SR_MASK;
}
put_reg(child, i, tmp);
datap++;
}
break;
case PTRACE_GETFPREGS: /* Get the child FPU state. */
if (copy_to_user(datap, &child->thread.fp,
sizeof(struct user_m68kfp_struct)))
ret = -EFAULT;
break;
case PTRACE_SETFPREGS: /* Set the child FPU state. */
if (copy_from_user(&child->thread.fp, datap,
sizeof(struct user_m68kfp_struct)))
ret = -EFAULT;
break;
case PTRACE_GET_THREAD_AREA:
ret = put_user(task_thread_info(child)->tp_value, datap);
break;
default:
ret = ptrace_request(child, request, addr, data);
break;
}
return ret;
out_eio:
return -EIO;
}
asmlinkage void syscall_trace(void)
{
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
? 0x80 : 0));
/*
* this isn't the same as continuing with a signal, but it will do
* for normal use. strace only continues with a signal if the
* stopping signal is not SIGTRAP. -brl
*/
if (current->exit_code) {
send_sig(current->exit_code, current, 1);
current->exit_code = 0;
}
}
/*
* linux/arch/m68k/kernel/ptrace.c
*
* Copyright (C) 1994 by Hamish Macdonald
* Taken from linux/kernel/ptrace.c and modified for M680x0.
* linux/kernel/ptrace.c is by Ross Biro 1/23/92, edited by Linus Torvalds
*
* This file is subject to the terms and conditions of the GNU General
* Public License. See the file COPYING in the main directory of
* this archive for more details.
*/
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/errno.h>
#include <linux/ptrace.h>
#include <linux/user.h>
#include <linux/signal.h>
#include <asm/uaccess.h>
#include <asm/page.h>
#include <asm/pgtable.h>
#include <asm/system.h>
#include <asm/processor.h>
/*
* does not yet catch signals sent when the child dies.
* in exit.c or in signal.c.
*/
/* determines which bits in the SR the user has access to. */
/* 1 = access 0 = no access */
#define SR_MASK 0x001f
/* sets the trace bits. */
#define TRACE_BITS 0xC000
#define T1_BIT 0x8000
#define T0_BIT 0x4000
/* Find the stack offset for a register, relative to thread.esp0. */
#define PT_REG(reg) ((long)&((struct pt_regs *)0)->reg)
#define SW_REG(reg) ((long)&((struct switch_stack *)0)->reg \
- sizeof(struct switch_stack))
/* Mapping from PT_xxx to the stack offset at which the register is
saved. Notice that usp has no stack-slot and needs to be treated
specially (see get_reg/put_reg below). */
static const int regoff[] = {
[0] = PT_REG(d1),
[1] = PT_REG(d2),
[2] = PT_REG(d3),
[3] = PT_REG(d4),
[4] = PT_REG(d5),
[5] = SW_REG(d6),
[6] = SW_REG(d7),
[7] = PT_REG(a0),
[8] = PT_REG(a1),
[9] = PT_REG(a2),
[10] = SW_REG(a3),
[11] = SW_REG(a4),
[12] = SW_REG(a5),
[13] = SW_REG(a6),
[14] = PT_REG(d0),
[15] = -1,
[16] = PT_REG(orig_d0),
[17] = PT_REG(sr),
[18] = PT_REG(pc),
};
/*
* Get contents of register REGNO in task TASK.
*/
static inline long get_reg(struct task_struct *task, int regno)
{
unsigned long *addr;
if (regno == PT_USP)
addr = &task->thread.usp;
else if (regno < ARRAY_SIZE(regoff))
addr = (unsigned long *)(task->thread.esp0 + regoff[regno]);
else
return 0;
/* Need to take stkadj into account. */
if (regno == PT_SR || regno == PT_PC) {
long stkadj = *(long *)(task->thread.esp0 + PT_REG(stkadj));
addr = (unsigned long *) ((unsigned long)addr + stkadj);
/* The sr is actually a 16 bit register. */
if (regno == PT_SR)
return *(unsigned short *)addr;
}
return *addr;
}
/*
* Write contents of register REGNO in task TASK.
*/
static inline int put_reg(struct task_struct *task, int regno,
unsigned long data)
{
unsigned long *addr;
if (regno == PT_USP)
addr = &task->thread.usp;
else if (regno < ARRAY_SIZE(regoff))
addr = (unsigned long *)(task->thread.esp0 + regoff[regno]);
else
return -1;
/* Need to take stkadj into account. */
if (regno == PT_SR || regno == PT_PC) {
long stkadj = *(long *)(task->thread.esp0 + PT_REG(stkadj));
addr = (unsigned long *) ((unsigned long)addr + stkadj);
/* The sr is actually a 16 bit register. */
if (regno == PT_SR) {
*(unsigned short *)addr = data;
return 0;
}
}
*addr = data;
return 0;
}
/*
* Make sure the single step bit is not set.
*/
static inline void singlestep_disable(struct task_struct *child)
{
unsigned long tmp = get_reg(child, PT_SR) & ~TRACE_BITS;
put_reg(child, PT_SR, tmp);
clear_tsk_thread_flag(child, TIF_DELAYED_TRACE);
}
/*
* Called by kernel/ptrace.c when detaching..
*/
void ptrace_disable(struct task_struct *child)
{
singlestep_disable(child);
}
void user_enable_single_step(struct task_struct *child)
{
unsigned long tmp = get_reg(child, PT_SR) & ~TRACE_BITS;
put_reg(child, PT_SR, tmp | T1_BIT);
set_tsk_thread_flag(child, TIF_DELAYED_TRACE);
}
void user_enable_block_step(struct task_struct *child)
{
unsigned long tmp = get_reg(child, PT_SR) & ~TRACE_BITS;
put_reg(child, PT_SR, tmp | T0_BIT);
}
void user_disable_single_step(struct task_struct *child)
{
singlestep_disable(child);
}
long arch_ptrace(struct task_struct *child, long request,
unsigned long addr, unsigned long data)
{
unsigned long tmp;
int i, ret = 0;
int regno = addr >> 2; /* temporary hack. */
unsigned long __user *datap = (unsigned long __user *) data;
switch (request) {
/* read the word at location addr in the USER area. */
case PTRACE_PEEKUSR:
if (addr & 3)
goto out_eio;
if (regno >= 0 && regno < 19) {
tmp = get_reg(child, regno);
} else if (regno >= 21 && regno < 49) {
tmp = child->thread.fp[regno - 21];
/* Convert internal fpu reg representation
* into long double format
*/
if (FPU_IS_EMU && (regno < 45) && !(regno % 3))
tmp = ((tmp & 0xffff0000) << 15) |
((tmp & 0x0000ffff) << 16);
} else
goto out_eio;
ret = put_user(tmp, datap);
break;
case PTRACE_POKEUSR:
/* write the word at location addr in the USER area */
if (addr & 3)
goto out_eio;
if (regno == PT_SR) {
data &= SR_MASK;
data |= get_reg(child, PT_SR) & ~SR_MASK;
}
if (regno >= 0 && regno < 19) {
if (put_reg(child, regno, data))
goto out_eio;
} else if (regno >= 21 && regno < 48) {
/* Convert long double format
* into internal fpu reg representation
*/
if (FPU_IS_EMU && (regno < 45) && !(regno % 3)) {
data <<= 15;
data = (data & 0xffff0000) |
((data & 0x0000ffff) >> 1);
}
child->thread.fp[regno - 21] = data;
} else
goto out_eio;
break;
case PTRACE_GETREGS: /* Get all gp regs from the child. */
for (i = 0; i < 19; i++) {
tmp = get_reg(child, i);
ret = put_user(tmp, datap);
if (ret)
break;
datap++;
}
break;
case PTRACE_SETREGS: /* Set all gp regs in the child. */
for (i = 0; i < 19; i++) {
ret = get_user(tmp, datap);
if (ret)
break;
if (i == PT_SR) {
tmp &= SR_MASK;
tmp |= get_reg(child, PT_SR) & ~SR_MASK;
}
put_reg(child, i, tmp);
datap++;
}
break;
case PTRACE_GETFPREGS: /* Get the child FPU state. */
if (copy_to_user(datap, &child->thread.fp,
sizeof(struct user_m68kfp_struct)))
ret = -EFAULT;
break;
case PTRACE_SETFPREGS: /* Set the child FPU state. */
if (copy_from_user(&child->thread.fp, datap,
sizeof(struct user_m68kfp_struct)))
ret = -EFAULT;
break;
case PTRACE_GET_THREAD_AREA:
ret = put_user(task_thread_info(child)->tp_value, datap);
break;
default:
ret = ptrace_request(child, request, addr, data);
break;
}
return ret;
out_eio:
return -EIO;
}
asmlinkage void syscall_trace(void)
{
ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
? 0x80 : 0));
/*
* this isn't the same as continuing with a signal, but it will do
* for normal use. strace only continues with a signal if the
* stopping signal is not SIGTRAP. -brl
*/
if (current->exit_code) {
send_sig(current->exit_code, current, 1);
current->exit_code = 0;
}
}
/* #ifdef CONFIG_MMU
* linux/arch/m68k/kernel/setup.c #include "setup_mm.c"
*
* Copyright (C) 1995 Hamish Macdonald
*/
/*
* This file handles the architecture-dependent parts of system setup
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/fs.h>
#include <linux/console.h>
#include <linux/genhd.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/module.h>
#include <linux/initrd.h>
#include <asm/bootinfo.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/fpu.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/machdep.h>
#ifdef CONFIG_AMIGA
#include <asm/amigahw.h>
#endif
#ifdef CONFIG_ATARI
#include <asm/atarihw.h>
#include <asm/atari_stram.h>
#endif
#ifdef CONFIG_SUN3X
#include <asm/dvma.h>
#endif
#include <asm/natfeat.h>
#if !FPSTATESIZE || !NR_IRQS
#warning No CPU/platform type selected, your kernel will not work!
#warning Are you building an allnoconfig kernel?
#endif
unsigned long m68k_machtype;
EXPORT_SYMBOL(m68k_machtype);
unsigned long m68k_cputype;
EXPORT_SYMBOL(m68k_cputype);
unsigned long m68k_fputype;
unsigned long m68k_mmutype;
EXPORT_SYMBOL(m68k_mmutype);
#ifdef CONFIG_VME
unsigned long vme_brdtype;
EXPORT_SYMBOL(vme_brdtype);
#endif
int m68k_is040or060;
EXPORT_SYMBOL(m68k_is040or060);
extern unsigned long availmem;
int m68k_num_memory;
EXPORT_SYMBOL(m68k_num_memory);
int m68k_realnum_memory;
EXPORT_SYMBOL(m68k_realnum_memory);
unsigned long m68k_memoffset;
struct mem_info m68k_memory[NUM_MEMINFO];
EXPORT_SYMBOL(m68k_memory);
struct mem_info m68k_ramdisk;
static char m68k_command_line[CL_SIZE];
void (*mach_sched_init) (irq_handler_t handler) __initdata = NULL;
/* machine dependent irq functions */
void (*mach_init_IRQ) (void) __initdata = NULL;
void (*mach_get_model) (char *model);
void (*mach_get_hardware_list) (struct seq_file *m);
/* machine dependent timer functions */
unsigned long (*mach_gettimeoffset) (void);
int (*mach_hwclk) (int, struct rtc_time*);
EXPORT_SYMBOL(mach_hwclk);
int (*mach_set_clock_mmss) (unsigned long);
unsigned int (*mach_get_ss)(void);
int (*mach_get_rtc_pll)(struct rtc_pll_info *);
int (*mach_set_rtc_pll)(struct rtc_pll_info *);
EXPORT_SYMBOL(mach_get_ss);
EXPORT_SYMBOL(mach_get_rtc_pll);
EXPORT_SYMBOL(mach_set_rtc_pll);
void (*mach_reset)( void );
void (*mach_halt)( void );
void (*mach_power_off)( void );
long mach_max_dma_address = 0x00ffffff; /* default set to the lower 16MB */
#ifdef CONFIG_HEARTBEAT
void (*mach_heartbeat) (int);
EXPORT_SYMBOL(mach_heartbeat);
#endif
#ifdef CONFIG_M68K_L2_CACHE
void (*mach_l2_flush) (int);
#endif
#if defined(CONFIG_INPUT_M68K_BEEP) || defined(CONFIG_INPUT_M68K_BEEP_MODULE)
void (*mach_beep)(unsigned int, unsigned int);
EXPORT_SYMBOL(mach_beep);
#endif
#if defined(CONFIG_ISA) && defined(MULTI_ISA)
int isa_type;
int isa_sex;
EXPORT_SYMBOL(isa_type);
EXPORT_SYMBOL(isa_sex);
#endif
extern int amiga_parse_bootinfo(const struct bi_record *);
extern int atari_parse_bootinfo(const struct bi_record *);
extern int mac_parse_bootinfo(const struct bi_record *);
extern int q40_parse_bootinfo(const struct bi_record *);
extern int bvme6000_parse_bootinfo(const struct bi_record *);
extern int mvme16x_parse_bootinfo(const struct bi_record *);
extern int mvme147_parse_bootinfo(const struct bi_record *);
extern int hp300_parse_bootinfo(const struct bi_record *);
extern int apollo_parse_bootinfo(const struct bi_record *);
extern void config_amiga(void);
extern void config_atari(void);
extern void config_mac(void);
extern void config_sun3(void);
extern void config_apollo(void);
extern void config_mvme147(void);
extern void config_mvme16x(void);
extern void config_bvme6000(void);
extern void config_hp300(void);
extern void config_q40(void);
extern void config_sun3x(void);
#define MASK_256K 0xfffc0000
extern void paging_init(void);
static void __init m68k_parse_bootinfo(const struct bi_record *record)
{
while (record->tag != BI_LAST) {
int unknown = 0;
const unsigned long *data = record->data;
switch (record->tag) {
case BI_MACHTYPE:
case BI_CPUTYPE:
case BI_FPUTYPE:
case BI_MMUTYPE:
/* Already set up by head.S */
break;
case BI_MEMCHUNK:
if (m68k_num_memory < NUM_MEMINFO) {
m68k_memory[m68k_num_memory].addr = data[0];
m68k_memory[m68k_num_memory].size = data[1];
m68k_num_memory++;
} else
printk("m68k_parse_bootinfo: too many memory chunks\n");
break;
case BI_RAMDISK:
m68k_ramdisk.addr = data[0];
m68k_ramdisk.size = data[1];
break;
case BI_COMMAND_LINE:
strlcpy(m68k_command_line, (const char *)data,
sizeof(m68k_command_line));
break;
default:
if (MACH_IS_AMIGA)
unknown = amiga_parse_bootinfo(record);
else if (MACH_IS_ATARI)
unknown = atari_parse_bootinfo(record);
else if (MACH_IS_MAC)
unknown = mac_parse_bootinfo(record);
else if (MACH_IS_Q40)
unknown = q40_parse_bootinfo(record);
else if (MACH_IS_BVME6000)
unknown = bvme6000_parse_bootinfo(record);
else if (MACH_IS_MVME16x)
unknown = mvme16x_parse_bootinfo(record);
else if (MACH_IS_MVME147)
unknown = mvme147_parse_bootinfo(record);
else if (MACH_IS_HP300)
unknown = hp300_parse_bootinfo(record);
else if (MACH_IS_APOLLO)
unknown = apollo_parse_bootinfo(record);
else
unknown = 1;
}
if (unknown)
printk("m68k_parse_bootinfo: unknown tag 0x%04x ignored\n",
record->tag);
record = (struct bi_record *)((unsigned long)record +
record->size);
}
m68k_realnum_memory = m68k_num_memory;
#ifdef CONFIG_SINGLE_MEMORY_CHUNK
if (m68k_num_memory > 1) {
printk("Ignoring last %i chunks of physical memory\n",
(m68k_num_memory - 1));
m68k_num_memory = 1;
}
#endif
}
void __init setup_arch(char **cmdline_p)
{
int i;
/* The bootinfo is located right after the kernel bss */
m68k_parse_bootinfo((const struct bi_record *)_end);
if (CPU_IS_040)
m68k_is040or060 = 4;
else if (CPU_IS_060)
m68k_is040or060 = 6;
/* FIXME: m68k_fputype is passed in by Penguin booter, which can
* be confused by software FPU emulation. BEWARE.
* We should really do our own FPU check at startup.
* [what do we do with buggy 68LC040s? if we have problems
* with them, we should add a test to check_bugs() below] */
#ifndef CONFIG_M68KFPU_EMU_ONLY
/* clear the fpu if we have one */
if (m68k_fputype & (FPU_68881|FPU_68882|FPU_68040|FPU_68060)) {
volatile int zero = 0;
asm volatile ("frestore %0" : : "m" (zero));
}
#endif
if (CPU_IS_060) {
u32 pcr;
asm (".chip 68060; movec %%pcr,%0; .chip 68k"
: "=d" (pcr));
if (((pcr >> 8) & 0xff) <= 5) {
printk("Enabling workaround for errata I14\n");
asm (".chip 68060; movec %0,%%pcr; .chip 68k"
: : "d" (pcr | 0x20));
}
}
init_mm.start_code = PAGE_OFFSET;
init_mm.end_code = (unsigned long)_etext;
init_mm.end_data = (unsigned long)_edata;
init_mm.brk = (unsigned long)_end;
*cmdline_p = m68k_command_line;
memcpy(boot_command_line, *cmdline_p, CL_SIZE);
parse_early_param();
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
#endif
switch (m68k_machtype) {
#ifdef CONFIG_AMIGA
case MACH_AMIGA:
config_amiga();
break;
#endif
#ifdef CONFIG_ATARI
case MACH_ATARI:
config_atari();
break;
#endif
#ifdef CONFIG_MAC
case MACH_MAC:
config_mac();
break;
#endif
#ifdef CONFIG_SUN3
case MACH_SUN3:
config_sun3();
break;
#endif
#ifdef CONFIG_APOLLO
case MACH_APOLLO:
config_apollo();
break;
#endif
#ifdef CONFIG_MVME147
case MACH_MVME147:
config_mvme147();
break;
#endif
#ifdef CONFIG_MVME16x
case MACH_MVME16x:
config_mvme16x();
break;
#endif
#ifdef CONFIG_BVME6000
case MACH_BVME6000:
config_bvme6000();
break;
#endif
#ifdef CONFIG_HP300
case MACH_HP300:
config_hp300();
break;
#endif
#ifdef CONFIG_Q40
case MACH_Q40:
config_q40();
break;
#endif
#ifdef CONFIG_SUN3X
case MACH_SUN3X:
config_sun3x();
break;
#endif
default:
panic("No configuration setup");
}
#ifdef CONFIG_NATFEAT
nf_init();
#endif
paging_init();
#ifndef CONFIG_SUN3
for (i = 1; i < m68k_num_memory; i++)
free_bootmem_node(NODE_DATA(i), m68k_memory[i].addr,
m68k_memory[i].size);
#ifdef CONFIG_BLK_DEV_INITRD
if (m68k_ramdisk.size) {
reserve_bootmem_node(__virt_to_node(phys_to_virt(m68k_ramdisk.addr)),
m68k_ramdisk.addr, m68k_ramdisk.size,
BOOTMEM_DEFAULT);
initrd_start = (unsigned long)phys_to_virt(m68k_ramdisk.addr);
initrd_end = initrd_start + m68k_ramdisk.size;
printk("initrd: %08lx - %08lx\n", initrd_start, initrd_end);
}
#endif
#ifdef CONFIG_ATARI
if (MACH_IS_ATARI)
atari_stram_reserve_pages((void *)availmem);
#endif
#ifdef CONFIG_SUN3X
if (MACH_IS_SUN3X) {
dvma_init();
}
#endif
#endif /* !CONFIG_SUN3 */
/* set ISA defs early as possible */
#if defined(CONFIG_ISA) && defined(MULTI_ISA)
if (MACH_IS_Q40) {
isa_type = ISA_TYPE_Q40;
isa_sex = 0;
}
#ifdef CONFIG_AMIGA_PCMCIA
if (MACH_IS_AMIGA && AMIGAHW_PRESENT(PCMCIA)) {
isa_type = ISA_TYPE_AG;
isa_sex = 1;
}
#endif
#endif
}
static int show_cpuinfo(struct seq_file *m, void *v)
{
const char *cpu, *mmu, *fpu;
unsigned long clockfreq, clockfactor;
#define LOOP_CYCLES_68020 (8)
#define LOOP_CYCLES_68030 (8)
#define LOOP_CYCLES_68040 (3)
#define LOOP_CYCLES_68060 (1)
if (CPU_IS_020) {
cpu = "68020";
clockfactor = LOOP_CYCLES_68020;
} else if (CPU_IS_030) {
cpu = "68030";
clockfactor = LOOP_CYCLES_68030;
} else if (CPU_IS_040) {
cpu = "68040";
clockfactor = LOOP_CYCLES_68040;
} else if (CPU_IS_060) {
cpu = "68060";
clockfactor = LOOP_CYCLES_68060;
} else {
cpu = "680x0";
clockfactor = 0;
}
#ifdef CONFIG_M68KFPU_EMU_ONLY
fpu = "none(soft float)";
#else #else
if (m68k_fputype & FPU_68881) #include "setup_no.c"
fpu = "68881";
else if (m68k_fputype & FPU_68882)
fpu = "68882";
else if (m68k_fputype & FPU_68040)
fpu = "68040";
else if (m68k_fputype & FPU_68060)
fpu = "68060";
else if (m68k_fputype & FPU_SUNFPA)
fpu = "Sun FPA";
else
fpu = "none";
#endif
if (m68k_mmutype & MMU_68851)
mmu = "68851";
else if (m68k_mmutype & MMU_68030)
mmu = "68030";
else if (m68k_mmutype & MMU_68040)
mmu = "68040";
else if (m68k_mmutype & MMU_68060)
mmu = "68060";
else if (m68k_mmutype & MMU_SUN3)
mmu = "Sun-3";
else if (m68k_mmutype & MMU_APOLLO)
mmu = "Apollo";
else
mmu = "unknown";
clockfreq = loops_per_jiffy * HZ * clockfactor;
seq_printf(m, "CPU:\t\t%s\n"
"MMU:\t\t%s\n"
"FPU:\t\t%s\n"
"Clocking:\t%lu.%1luMHz\n"
"BogoMips:\t%lu.%02lu\n"
"Calibration:\t%lu loops\n",
cpu, mmu, fpu,
clockfreq/1000000,(clockfreq/100000)%10,
loops_per_jiffy/(500000/HZ),(loops_per_jiffy/(5000/HZ))%100,
loops_per_jiffy);
return 0;
}
static void *c_start(struct seq_file *m, loff_t *pos)
{
return *pos < 1 ? (void *)1 : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return NULL;
}
static void c_stop(struct seq_file *m, void *v)
{
}
const struct seq_operations cpuinfo_op = {
.start = c_start,
.next = c_next,
.stop = c_stop,
.show = show_cpuinfo,
};
#ifdef CONFIG_PROC_HARDWARE
static int hardware_proc_show(struct seq_file *m, void *v)
{
char model[80];
unsigned long mem;
int i;
if (mach_get_model)
mach_get_model(model);
else
strcpy(model, "Unknown m68k");
seq_printf(m, "Model:\t\t%s\n", model);
for (mem = 0, i = 0; i < m68k_num_memory; i++)
mem += m68k_memory[i].size;
seq_printf(m, "System Memory:\t%ldK\n", mem >> 10);
if (mach_get_hardware_list)
mach_get_hardware_list(m);
return 0;
}
static int hardware_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, hardware_proc_show, NULL);
}
static const struct file_operations hardware_proc_fops = {
.open = hardware_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init proc_hardware_init(void)
{
proc_create("hardware", 0, NULL, &hardware_proc_fops);
return 0;
}
module_init(proc_hardware_init);
#endif #endif
void check_bugs(void)
{
#ifndef CONFIG_M68KFPU_EMU
if (m68k_fputype == 0) {
printk(KERN_EMERG "*** YOU DO NOT HAVE A FLOATING POINT UNIT, "
"WHICH IS REQUIRED BY LINUX/M68K ***\n");
printk(KERN_EMERG "Upgrade your hardware or join the FPU "
"emulation project\n");
panic("no FPU");
}
#endif /* !CONFIG_M68KFPU_EMU */
}
#ifdef CONFIG_ADB
static int __init adb_probe_sync_enable (char *str) {
extern int __adb_probe_sync;
__adb_probe_sync = 1;
return 1;
}
__setup("adb_sync", adb_probe_sync_enable);
#endif /* CONFIG_ADB */
/*
* linux/arch/m68k/kernel/setup.c
*
* Copyright (C) 1995 Hamish Macdonald
*/
/*
* This file handles the architecture-dependent parts of system setup
*/
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/sched.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/fs.h>
#include <linux/console.h>
#include <linux/genhd.h>
#include <linux/errno.h>
#include <linux/string.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/module.h>
#include <linux/initrd.h>
#include <asm/bootinfo.h>
#include <asm/sections.h>
#include <asm/setup.h>
#include <asm/fpu.h>
#include <asm/irq.h>
#include <asm/io.h>
#include <asm/machdep.h>
#ifdef CONFIG_AMIGA
#include <asm/amigahw.h>
#endif
#ifdef CONFIG_ATARI
#include <asm/atarihw.h>
#include <asm/atari_stram.h>
#endif
#ifdef CONFIG_SUN3X
#include <asm/dvma.h>
#endif
#include <asm/natfeat.h>
#if !FPSTATESIZE || !NR_IRQS
#warning No CPU/platform type selected, your kernel will not work!
#warning Are you building an allnoconfig kernel?
#endif
unsigned long m68k_machtype;
EXPORT_SYMBOL(m68k_machtype);
unsigned long m68k_cputype;
EXPORT_SYMBOL(m68k_cputype);
unsigned long m68k_fputype;
unsigned long m68k_mmutype;
EXPORT_SYMBOL(m68k_mmutype);
#ifdef CONFIG_VME
unsigned long vme_brdtype;
EXPORT_SYMBOL(vme_brdtype);
#endif
int m68k_is040or060;
EXPORT_SYMBOL(m68k_is040or060);
extern unsigned long availmem;
int m68k_num_memory;
EXPORT_SYMBOL(m68k_num_memory);
int m68k_realnum_memory;
EXPORT_SYMBOL(m68k_realnum_memory);
unsigned long m68k_memoffset;
struct mem_info m68k_memory[NUM_MEMINFO];
EXPORT_SYMBOL(m68k_memory);
struct mem_info m68k_ramdisk;
static char m68k_command_line[CL_SIZE];
void (*mach_sched_init) (irq_handler_t handler) __initdata = NULL;
/* machine dependent irq functions */
void (*mach_init_IRQ) (void) __initdata = NULL;
void (*mach_get_model) (char *model);
void (*mach_get_hardware_list) (struct seq_file *m);
/* machine dependent timer functions */
unsigned long (*mach_gettimeoffset) (void);
int (*mach_hwclk) (int, struct rtc_time*);
EXPORT_SYMBOL(mach_hwclk);
int (*mach_set_clock_mmss) (unsigned long);
unsigned int (*mach_get_ss)(void);
int (*mach_get_rtc_pll)(struct rtc_pll_info *);
int (*mach_set_rtc_pll)(struct rtc_pll_info *);
EXPORT_SYMBOL(mach_get_ss);
EXPORT_SYMBOL(mach_get_rtc_pll);
EXPORT_SYMBOL(mach_set_rtc_pll);
void (*mach_reset)( void );
void (*mach_halt)( void );
void (*mach_power_off)( void );
long mach_max_dma_address = 0x00ffffff; /* default set to the lower 16MB */
#ifdef CONFIG_HEARTBEAT
void (*mach_heartbeat) (int);
EXPORT_SYMBOL(mach_heartbeat);
#endif
#ifdef CONFIG_M68K_L2_CACHE
void (*mach_l2_flush) (int);
#endif
#if defined(CONFIG_INPUT_M68K_BEEP) || defined(CONFIG_INPUT_M68K_BEEP_MODULE)
void (*mach_beep)(unsigned int, unsigned int);
EXPORT_SYMBOL(mach_beep);
#endif
#if defined(CONFIG_ISA) && defined(MULTI_ISA)
int isa_type;
int isa_sex;
EXPORT_SYMBOL(isa_type);
EXPORT_SYMBOL(isa_sex);
#endif
extern int amiga_parse_bootinfo(const struct bi_record *);
extern int atari_parse_bootinfo(const struct bi_record *);
extern int mac_parse_bootinfo(const struct bi_record *);
extern int q40_parse_bootinfo(const struct bi_record *);
extern int bvme6000_parse_bootinfo(const struct bi_record *);
extern int mvme16x_parse_bootinfo(const struct bi_record *);
extern int mvme147_parse_bootinfo(const struct bi_record *);
extern int hp300_parse_bootinfo(const struct bi_record *);
extern int apollo_parse_bootinfo(const struct bi_record *);
extern void config_amiga(void);
extern void config_atari(void);
extern void config_mac(void);
extern void config_sun3(void);
extern void config_apollo(void);
extern void config_mvme147(void);
extern void config_mvme16x(void);
extern void config_bvme6000(void);
extern void config_hp300(void);
extern void config_q40(void);
extern void config_sun3x(void);
#define MASK_256K 0xfffc0000
extern void paging_init(void);
static void __init m68k_parse_bootinfo(const struct bi_record *record)
{
while (record->tag != BI_LAST) {
int unknown = 0;
const unsigned long *data = record->data;
switch (record->tag) {
case BI_MACHTYPE:
case BI_CPUTYPE:
case BI_FPUTYPE:
case BI_MMUTYPE:
/* Already set up by head.S */
break;
case BI_MEMCHUNK:
if (m68k_num_memory < NUM_MEMINFO) {
m68k_memory[m68k_num_memory].addr = data[0];
m68k_memory[m68k_num_memory].size = data[1];
m68k_num_memory++;
} else
printk("m68k_parse_bootinfo: too many memory chunks\n");
break;
case BI_RAMDISK:
m68k_ramdisk.addr = data[0];
m68k_ramdisk.size = data[1];
break;
case BI_COMMAND_LINE:
strlcpy(m68k_command_line, (const char *)data,
sizeof(m68k_command_line));
break;
default:
if (MACH_IS_AMIGA)
unknown = amiga_parse_bootinfo(record);
else if (MACH_IS_ATARI)
unknown = atari_parse_bootinfo(record);
else if (MACH_IS_MAC)
unknown = mac_parse_bootinfo(record);
else if (MACH_IS_Q40)
unknown = q40_parse_bootinfo(record);
else if (MACH_IS_BVME6000)
unknown = bvme6000_parse_bootinfo(record);
else if (MACH_IS_MVME16x)
unknown = mvme16x_parse_bootinfo(record);
else if (MACH_IS_MVME147)
unknown = mvme147_parse_bootinfo(record);
else if (MACH_IS_HP300)
unknown = hp300_parse_bootinfo(record);
else if (MACH_IS_APOLLO)
unknown = apollo_parse_bootinfo(record);
else
unknown = 1;
}
if (unknown)
printk("m68k_parse_bootinfo: unknown tag 0x%04x ignored\n",
record->tag);
record = (struct bi_record *)((unsigned long)record +
record->size);
}
m68k_realnum_memory = m68k_num_memory;
#ifdef CONFIG_SINGLE_MEMORY_CHUNK
if (m68k_num_memory > 1) {
printk("Ignoring last %i chunks of physical memory\n",
(m68k_num_memory - 1));
m68k_num_memory = 1;
}
#endif
}
void __init setup_arch(char **cmdline_p)
{
int i;
/* The bootinfo is located right after the kernel bss */
m68k_parse_bootinfo((const struct bi_record *)_end);
if (CPU_IS_040)
m68k_is040or060 = 4;
else if (CPU_IS_060)
m68k_is040or060 = 6;
/* FIXME: m68k_fputype is passed in by Penguin booter, which can
* be confused by software FPU emulation. BEWARE.
* We should really do our own FPU check at startup.
* [what do we do with buggy 68LC040s? if we have problems
* with them, we should add a test to check_bugs() below] */
#ifndef CONFIG_M68KFPU_EMU_ONLY
/* clear the fpu if we have one */
if (m68k_fputype & (FPU_68881|FPU_68882|FPU_68040|FPU_68060)) {
volatile int zero = 0;
asm volatile ("frestore %0" : : "m" (zero));
}
#endif
if (CPU_IS_060) {
u32 pcr;
asm (".chip 68060; movec %%pcr,%0; .chip 68k"
: "=d" (pcr));
if (((pcr >> 8) & 0xff) <= 5) {
printk("Enabling workaround for errata I14\n");
asm (".chip 68060; movec %0,%%pcr; .chip 68k"
: : "d" (pcr | 0x20));
}
}
init_mm.start_code = PAGE_OFFSET;
init_mm.end_code = (unsigned long)_etext;
init_mm.end_data = (unsigned long)_edata;
init_mm.brk = (unsigned long)_end;
*cmdline_p = m68k_command_line;
memcpy(boot_command_line, *cmdline_p, CL_SIZE);
parse_early_param();
#ifdef CONFIG_DUMMY_CONSOLE
conswitchp = &dummy_con;
#endif
switch (m68k_machtype) {
#ifdef CONFIG_AMIGA
case MACH_AMIGA:
config_amiga();
break;
#endif
#ifdef CONFIG_ATARI
case MACH_ATARI:
config_atari();
break;
#endif
#ifdef CONFIG_MAC
case MACH_MAC:
config_mac();
break;
#endif
#ifdef CONFIG_SUN3
case MACH_SUN3:
config_sun3();
break;
#endif
#ifdef CONFIG_APOLLO
case MACH_APOLLO:
config_apollo();
break;
#endif
#ifdef CONFIG_MVME147
case MACH_MVME147:
config_mvme147();
break;
#endif
#ifdef CONFIG_MVME16x
case MACH_MVME16x:
config_mvme16x();
break;
#endif
#ifdef CONFIG_BVME6000
case MACH_BVME6000:
config_bvme6000();
break;
#endif
#ifdef CONFIG_HP300
case MACH_HP300:
config_hp300();
break;
#endif
#ifdef CONFIG_Q40
case MACH_Q40:
config_q40();
break;
#endif
#ifdef CONFIG_SUN3X
case MACH_SUN3X:
config_sun3x();
break;
#endif
default:
panic("No configuration setup");
}
#ifdef CONFIG_NATFEAT
nf_init();
#endif
paging_init();
#ifndef CONFIG_SUN3
for (i = 1; i < m68k_num_memory; i++)
free_bootmem_node(NODE_DATA(i), m68k_memory[i].addr,
m68k_memory[i].size);
#ifdef CONFIG_BLK_DEV_INITRD
if (m68k_ramdisk.size) {
reserve_bootmem_node(__virt_to_node(phys_to_virt(m68k_ramdisk.addr)),
m68k_ramdisk.addr, m68k_ramdisk.size,
BOOTMEM_DEFAULT);
initrd_start = (unsigned long)phys_to_virt(m68k_ramdisk.addr);
initrd_end = initrd_start + m68k_ramdisk.size;
printk("initrd: %08lx - %08lx\n", initrd_start, initrd_end);
}
#endif
#ifdef CONFIG_ATARI
if (MACH_IS_ATARI)
atari_stram_reserve_pages((void *)availmem);
#endif
#ifdef CONFIG_SUN3X
if (MACH_IS_SUN3X) {
dvma_init();
}
#endif
#endif /* !CONFIG_SUN3 */
/* set ISA defs early as possible */
#if defined(CONFIG_ISA) && defined(MULTI_ISA)
if (MACH_IS_Q40) {
isa_type = ISA_TYPE_Q40;
isa_sex = 0;
}
#ifdef CONFIG_AMIGA_PCMCIA
if (MACH_IS_AMIGA && AMIGAHW_PRESENT(PCMCIA)) {
isa_type = ISA_TYPE_AG;
isa_sex = 1;
}
#endif
#endif
}
static int show_cpuinfo(struct seq_file *m, void *v)
{
const char *cpu, *mmu, *fpu;
unsigned long clockfreq, clockfactor;
#define LOOP_CYCLES_68020 (8)
#define LOOP_CYCLES_68030 (8)
#define LOOP_CYCLES_68040 (3)
#define LOOP_CYCLES_68060 (1)
if (CPU_IS_020) {
cpu = "68020";
clockfactor = LOOP_CYCLES_68020;
} else if (CPU_IS_030) {
cpu = "68030";
clockfactor = LOOP_CYCLES_68030;
} else if (CPU_IS_040) {
cpu = "68040";
clockfactor = LOOP_CYCLES_68040;
} else if (CPU_IS_060) {
cpu = "68060";
clockfactor = LOOP_CYCLES_68060;
} else {
cpu = "680x0";
clockfactor = 0;
}
#ifdef CONFIG_M68KFPU_EMU_ONLY
fpu = "none(soft float)";
#else
if (m68k_fputype & FPU_68881)
fpu = "68881";
else if (m68k_fputype & FPU_68882)
fpu = "68882";
else if (m68k_fputype & FPU_68040)
fpu = "68040";
else if (m68k_fputype & FPU_68060)
fpu = "68060";
else if (m68k_fputype & FPU_SUNFPA)
fpu = "Sun FPA";
else
fpu = "none";
#endif
if (m68k_mmutype & MMU_68851)
mmu = "68851";
else if (m68k_mmutype & MMU_68030)
mmu = "68030";
else if (m68k_mmutype & MMU_68040)
mmu = "68040";
else if (m68k_mmutype & MMU_68060)
mmu = "68060";
else if (m68k_mmutype & MMU_SUN3)
mmu = "Sun-3";
else if (m68k_mmutype & MMU_APOLLO)
mmu = "Apollo";
else
mmu = "unknown";
clockfreq = loops_per_jiffy * HZ * clockfactor;
seq_printf(m, "CPU:\t\t%s\n"
"MMU:\t\t%s\n"
"FPU:\t\t%s\n"
"Clocking:\t%lu.%1luMHz\n"
"BogoMips:\t%lu.%02lu\n"
"Calibration:\t%lu loops\n",
cpu, mmu, fpu,
clockfreq/1000000,(clockfreq/100000)%10,
loops_per_jiffy/(500000/HZ),(loops_per_jiffy/(5000/HZ))%100,
loops_per_jiffy);
return 0;
}
static void *c_start(struct seq_file *m, loff_t *pos)
{
return *pos < 1 ? (void *)1 : NULL;
}
static void *c_next(struct seq_file *m, void *v, loff_t *pos)
{
++*pos;
return NULL;
}
static void c_stop(struct seq_file *m, void *v)
{
}
const struct seq_operations cpuinfo_op = {
.start = c_start,
.next = c_next,
.stop = c_stop,
.show = show_cpuinfo,
};
#ifdef CONFIG_PROC_HARDWARE
static int hardware_proc_show(struct seq_file *m, void *v)
{
char model[80];
unsigned long mem;
int i;
if (mach_get_model)
mach_get_model(model);
else
strcpy(model, "Unknown m68k");
seq_printf(m, "Model:\t\t%s\n", model);
for (mem = 0, i = 0; i < m68k_num_memory; i++)
mem += m68k_memory[i].size;
seq_printf(m, "System Memory:\t%ldK\n", mem >> 10);
if (mach_get_hardware_list)
mach_get_hardware_list(m);
return 0;
}
static int hardware_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, hardware_proc_show, NULL);
}
static const struct file_operations hardware_proc_fops = {
.open = hardware_proc_open,
.read = seq_read,
.llseek = seq_lseek,
.release = single_release,
};
static int __init proc_hardware_init(void)
{
proc_create("hardware", 0, NULL, &hardware_proc_fops);
return 0;
}
module_init(proc_hardware_init);
#endif
void check_bugs(void)
{
#ifndef CONFIG_M68KFPU_EMU
if (m68k_fputype == 0) {
printk(KERN_EMERG "*** YOU DO NOT HAVE A FLOATING POINT UNIT, "
"WHICH IS REQUIRED BY LINUX/M68K ***\n");
printk(KERN_EMERG "Upgrade your hardware or join the FPU "
"emulation project\n");
panic("no FPU");
}
#endif /* !CONFIG_M68KFPU_EMU */
}
#ifdef CONFIG_ADB
static int __init adb_probe_sync_enable (char *str) {
extern int __adb_probe_sync;
__adb_probe_sync = 1;
return 1;
}
__setup("adb_sync", adb_probe_sync_enable);
#endif /* CONFIG_ADB */
/* #ifdef CONFIG_MMU
* linux/arch/m68k/kernel/signal.c #include "signal_mm.c"
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
/*
* Linux/m68k support by Hamish Macdonald
*
* 68060 fixes by Jesper Skov
*
* 1997-12-01 Modified for POSIX.1b signals by Andreas Schwab
*
* mathemu support by Roman Zippel
* (Note: fpstate in the signal context is completely ignored for the emulator
* and the internal floating point format is put on stack)
*/
/*
* ++roman (07/09/96): implemented signal stacks (specially for tosemu on
* Atari :-) Current limitation: Only one sigstack can be active at one time.
* If a second signal with SA_ONSTACK set arrives while working on a sigstack,
* SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested
* signal handlers!
*/
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/syscalls.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/stddef.h>
#include <linux/highuid.h>
#include <linux/personality.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
#include <linux/module.h>
#include <asm/setup.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/traps.h>
#include <asm/ucontext.h>
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
static const int frame_extra_sizes[16] = {
[1] = -1, /* sizeof(((struct frame *)0)->un.fmt1), */
[2] = sizeof(((struct frame *)0)->un.fmt2),
[3] = sizeof(((struct frame *)0)->un.fmt3),
[4] = sizeof(((struct frame *)0)->un.fmt4),
[5] = -1, /* sizeof(((struct frame *)0)->un.fmt5), */
[6] = -1, /* sizeof(((struct frame *)0)->un.fmt6), */
[7] = sizeof(((struct frame *)0)->un.fmt7),
[8] = -1, /* sizeof(((struct frame *)0)->un.fmt8), */
[9] = sizeof(((struct frame *)0)->un.fmt9),
[10] = sizeof(((struct frame *)0)->un.fmta),
[11] = sizeof(((struct frame *)0)->un.fmtb),
[12] = -1, /* sizeof(((struct frame *)0)->un.fmtc), */
[13] = -1, /* sizeof(((struct frame *)0)->un.fmtd), */
[14] = -1, /* sizeof(((struct frame *)0)->un.fmte), */
[15] = -1, /* sizeof(((struct frame *)0)->un.fmtf), */
};
int handle_kernel_fault(struct pt_regs *regs)
{
const struct exception_table_entry *fixup;
struct pt_regs *tregs;
/* Are we prepared to handle this kernel fault? */
fixup = search_exception_tables(regs->pc);
if (!fixup)
return 0;
/* Create a new four word stack frame, discarding the old one. */
regs->stkadj = frame_extra_sizes[regs->format];
tregs = (struct pt_regs *)((long)regs + regs->stkadj);
tregs->vector = regs->vector;
tregs->format = 0;
tregs->pc = fixup->fixup;
tregs->sr = regs->sr;
return 1;
}
/*
* Atomically swap in the new signal mask, and wait for a signal.
*/
asmlinkage int
sys_sigsuspend(int unused0, int unused1, old_sigset_t mask)
{
mask &= _BLOCKABLE;
spin_lock_irq(&current->sighand->siglock);
current->saved_sigmask = current->blocked;
siginitset(&current->blocked, mask);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
current->state = TASK_INTERRUPTIBLE;
schedule();
set_restore_sigmask();
return -ERESTARTNOHAND;
}
asmlinkage int
sys_sigaction(int sig, const struct old_sigaction __user *act,
struct old_sigaction __user *oact)
{
struct k_sigaction new_ka, old_ka;
int ret;
if (act) {
old_sigset_t mask;
if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
__get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
__get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
__get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
__get_user(mask, &act->sa_mask))
return -EFAULT;
siginitset(&new_ka.sa.sa_mask, mask);
}
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
if (!ret && oact) {
if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
__put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
__put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
__put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
return -EFAULT;
}
return ret;
}
asmlinkage int
sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
{
return do_sigaltstack(uss, uoss, rdusp());
}
/*
* Do a signal return; undo the signal stack.
*
* Keep the return code on the stack quadword aligned!
* That makes the cache flush below easier.
*/
struct sigframe
{
char __user *pretcode;
int sig;
int code;
struct sigcontext __user *psc;
char retcode[8];
unsigned long extramask[_NSIG_WORDS-1];
struct sigcontext sc;
};
struct rt_sigframe
{
char __user *pretcode;
int sig;
struct siginfo __user *pinfo;
void __user *puc;
char retcode[8];
struct siginfo info;
struct ucontext uc;
};
static unsigned char fpu_version; /* version number of fpu, set by setup_frame */
static inline int restore_fpu_state(struct sigcontext *sc)
{
int err = 1;
if (FPU_IS_EMU) {
/* restore registers */
memcpy(current->thread.fpcntl, sc->sc_fpcntl, 12);
memcpy(current->thread.fp, sc->sc_fpregs, 24);
return 0;
}
if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
/* Verify the frame format. */
if (!CPU_IS_060 && (sc->sc_fpstate[0] != fpu_version))
goto out;
if (CPU_IS_020_OR_030) {
if (m68k_fputype & FPU_68881 &&
!(sc->sc_fpstate[1] == 0x18 || sc->sc_fpstate[1] == 0xb4))
goto out;
if (m68k_fputype & FPU_68882 &&
!(sc->sc_fpstate[1] == 0x38 || sc->sc_fpstate[1] == 0xd4))
goto out;
} else if (CPU_IS_040) {
if (!(sc->sc_fpstate[1] == 0x00 ||
sc->sc_fpstate[1] == 0x28 ||
sc->sc_fpstate[1] == 0x60))
goto out;
} else if (CPU_IS_060) {
if (!(sc->sc_fpstate[3] == 0x00 ||
sc->sc_fpstate[3] == 0x60 ||
sc->sc_fpstate[3] == 0xe0))
goto out;
} else
goto out;
__asm__ volatile (".chip 68k/68881\n\t"
"fmovemx %0,%%fp0-%%fp1\n\t"
"fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
".chip 68k"
: /* no outputs */
: "m" (*sc->sc_fpregs), "m" (*sc->sc_fpcntl));
}
__asm__ volatile (".chip 68k/68881\n\t"
"frestore %0\n\t"
".chip 68k" : : "m" (*sc->sc_fpstate));
err = 0;
out:
return err;
}
#define FPCONTEXT_SIZE 216
#define uc_fpstate uc_filler[0]
#define uc_formatvec uc_filler[FPCONTEXT_SIZE/4]
#define uc_extra uc_filler[FPCONTEXT_SIZE/4+1]
static inline int rt_restore_fpu_state(struct ucontext __user *uc)
{
unsigned char fpstate[FPCONTEXT_SIZE];
int context_size = CPU_IS_060 ? 8 : 0;
fpregset_t fpregs;
int err = 1;
if (FPU_IS_EMU) {
/* restore fpu control register */
if (__copy_from_user(current->thread.fpcntl,
uc->uc_mcontext.fpregs.f_fpcntl, 12))
goto out;
/* restore all other fpu register */
if (__copy_from_user(current->thread.fp,
uc->uc_mcontext.fpregs.f_fpregs, 96))
goto out;
return 0;
}
if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate))
goto out;
if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
if (!CPU_IS_060)
context_size = fpstate[1];
/* Verify the frame format. */
if (!CPU_IS_060 && (fpstate[0] != fpu_version))
goto out;
if (CPU_IS_020_OR_030) {
if (m68k_fputype & FPU_68881 &&
!(context_size == 0x18 || context_size == 0xb4))
goto out;
if (m68k_fputype & FPU_68882 &&
!(context_size == 0x38 || context_size == 0xd4))
goto out;
} else if (CPU_IS_040) {
if (!(context_size == 0x00 ||
context_size == 0x28 ||
context_size == 0x60))
goto out;
} else if (CPU_IS_060) {
if (!(fpstate[3] == 0x00 ||
fpstate[3] == 0x60 ||
fpstate[3] == 0xe0))
goto out;
} else
goto out;
if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs,
sizeof(fpregs)))
goto out;
__asm__ volatile (".chip 68k/68881\n\t"
"fmovemx %0,%%fp0-%%fp7\n\t"
"fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
".chip 68k"
: /* no outputs */
: "m" (*fpregs.f_fpregs),
"m" (*fpregs.f_fpcntl));
}
if (context_size &&
__copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1,
context_size))
goto out;
__asm__ volatile (".chip 68k/68881\n\t"
"frestore %0\n\t"
".chip 68k" : : "m" (*fpstate));
err = 0;
out:
return err;
}
static int mangle_kernel_stack(struct pt_regs *regs, int formatvec,
void __user *fp)
{
int fsize = frame_extra_sizes[formatvec >> 12];
if (fsize < 0) {
/*
* user process trying to return with weird frame format
*/
#ifdef DEBUG
printk("user process returning with weird frame format\n");
#endif
return 1;
}
if (!fsize) {
regs->format = formatvec >> 12;
regs->vector = formatvec & 0xfff;
} else {
struct switch_stack *sw = (struct switch_stack *)regs - 1;
unsigned long buf[fsize / 2]; /* yes, twice as much */
/* that'll make sure that expansion won't crap over data */
if (copy_from_user(buf + fsize / 4, fp, fsize))
return 1;
/* point of no return */
regs->format = formatvec >> 12;
regs->vector = formatvec & 0xfff;
#define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
__asm__ __volatile__
(" movel %0,%/a0\n\t"
" subl %1,%/a0\n\t" /* make room on stack */
" movel %/a0,%/sp\n\t" /* set stack pointer */
/* move switch_stack and pt_regs */
"1: movel %0@+,%/a0@+\n\t"
" dbra %2,1b\n\t"
" lea %/sp@(%c3),%/a0\n\t" /* add offset of fmt */
" lsrl #2,%1\n\t"
" subql #1,%1\n\t"
/* copy to the gap we'd made */
"2: movel %4@+,%/a0@+\n\t"
" dbra %1,2b\n\t"
" bral ret_from_signal\n"
: /* no outputs, it doesn't ever return */
: "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
"n" (frame_offset), "a" (buf + fsize/4)
: "a0");
#undef frame_offset
}
return 0;
}
static inline int
restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __user *fp)
{
int formatvec;
struct sigcontext context;
int err;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
/* get previous context */
if (copy_from_user(&context, usc, sizeof(context)))
goto badframe;
/* restore passed registers */
regs->d0 = context.sc_d0;
regs->d1 = context.sc_d1;
regs->a0 = context.sc_a0;
regs->a1 = context.sc_a1;
regs->sr = (regs->sr & 0xff00) | (context.sc_sr & 0xff);
regs->pc = context.sc_pc;
regs->orig_d0 = -1; /* disable syscall checks */
wrusp(context.sc_usp);
formatvec = context.sc_formatvec;
err = restore_fpu_state(&context);
if (err || mangle_kernel_stack(regs, formatvec, fp))
goto badframe;
return 0;
badframe:
return 1;
}
static inline int
rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
struct ucontext __user *uc)
{
int temp;
greg_t __user *gregs = uc->uc_mcontext.gregs;
unsigned long usp;
int err;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
err = __get_user(temp, &uc->uc_mcontext.version);
if (temp != MCONTEXT_VERSION)
goto badframe;
/* restore passed registers */
err |= __get_user(regs->d0, &gregs[0]);
err |= __get_user(regs->d1, &gregs[1]);
err |= __get_user(regs->d2, &gregs[2]);
err |= __get_user(regs->d3, &gregs[3]);
err |= __get_user(regs->d4, &gregs[4]);
err |= __get_user(regs->d5, &gregs[5]);
err |= __get_user(sw->d6, &gregs[6]);
err |= __get_user(sw->d7, &gregs[7]);
err |= __get_user(regs->a0, &gregs[8]);
err |= __get_user(regs->a1, &gregs[9]);
err |= __get_user(regs->a2, &gregs[10]);
err |= __get_user(sw->a3, &gregs[11]);
err |= __get_user(sw->a4, &gregs[12]);
err |= __get_user(sw->a5, &gregs[13]);
err |= __get_user(sw->a6, &gregs[14]);
err |= __get_user(usp, &gregs[15]);
wrusp(usp);
err |= __get_user(regs->pc, &gregs[16]);
err |= __get_user(temp, &gregs[17]);
regs->sr = (regs->sr & 0xff00) | (temp & 0xff);
regs->orig_d0 = -1; /* disable syscall checks */
err |= __get_user(temp, &uc->uc_formatvec);
err |= rt_restore_fpu_state(uc);
if (err || do_sigaltstack(&uc->uc_stack, NULL, usp) == -EFAULT)
goto badframe;
if (mangle_kernel_stack(regs, temp, &uc->uc_extra))
goto badframe;
return 0;
badframe:
return 1;
}
asmlinkage int do_sigreturn(unsigned long __unused)
{
struct switch_stack *sw = (struct switch_stack *) &__unused;
struct pt_regs *regs = (struct pt_regs *) (sw + 1);
unsigned long usp = rdusp();
struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);
sigset_t set;
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__get_user(set.sig[0], &frame->sc.sc_mask) ||
(_NSIG_WORDS > 1 &&
__copy_from_user(&set.sig[1], &frame->extramask,
sizeof(frame->extramask))))
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
current->blocked = set;
recalc_sigpending();
if (restore_sigcontext(regs, &frame->sc, frame + 1))
goto badframe;
return regs->d0;
badframe:
force_sig(SIGSEGV, current);
return 0;
}
asmlinkage int do_rt_sigreturn(unsigned long __unused)
{
struct switch_stack *sw = (struct switch_stack *) &__unused;
struct pt_regs *regs = (struct pt_regs *) (sw + 1);
unsigned long usp = rdusp();
struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4);
sigset_t set;
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
current->blocked = set;
recalc_sigpending();
if (rt_restore_ucontext(regs, sw, &frame->uc))
goto badframe;
return regs->d0;
badframe:
force_sig(SIGSEGV, current);
return 0;
}
/*
* Set up a signal frame.
*/
static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
{
if (FPU_IS_EMU) {
/* save registers */
memcpy(sc->sc_fpcntl, current->thread.fpcntl, 12);
memcpy(sc->sc_fpregs, current->thread.fp, 24);
return;
}
__asm__ volatile (".chip 68k/68881\n\t"
"fsave %0\n\t"
".chip 68k"
: : "m" (*sc->sc_fpstate) : "memory");
if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
fpu_version = sc->sc_fpstate[0];
if (CPU_IS_020_OR_030 &&
regs->vector >= (VEC_FPBRUC * 4) &&
regs->vector <= (VEC_FPNAN * 4)) {
/* Clear pending exception in 68882 idle frame */
if (*(unsigned short *) sc->sc_fpstate == 0x1f38)
sc->sc_fpstate[0x38] |= 1 << 3;
}
__asm__ volatile (".chip 68k/68881\n\t"
"fmovemx %%fp0-%%fp1,%0\n\t"
"fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
".chip 68k"
: "=m" (*sc->sc_fpregs),
"=m" (*sc->sc_fpcntl)
: /* no inputs */
: "memory");
}
}
static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs)
{
unsigned char fpstate[FPCONTEXT_SIZE];
int context_size = CPU_IS_060 ? 8 : 0;
int err = 0;
if (FPU_IS_EMU) {
/* save fpu control register */
err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpcntl,
current->thread.fpcntl, 12);
/* save all other fpu register */
err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpregs,
current->thread.fp, 96);
return err;
}
__asm__ volatile (".chip 68k/68881\n\t"
"fsave %0\n\t"
".chip 68k"
: : "m" (*fpstate) : "memory");
err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate);
if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
fpregset_t fpregs;
if (!CPU_IS_060)
context_size = fpstate[1];
fpu_version = fpstate[0];
if (CPU_IS_020_OR_030 &&
regs->vector >= (VEC_FPBRUC * 4) &&
regs->vector <= (VEC_FPNAN * 4)) {
/* Clear pending exception in 68882 idle frame */
if (*(unsigned short *) fpstate == 0x1f38)
fpstate[0x38] |= 1 << 3;
}
__asm__ volatile (".chip 68k/68881\n\t"
"fmovemx %%fp0-%%fp7,%0\n\t"
"fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
".chip 68k"
: "=m" (*fpregs.f_fpregs),
"=m" (*fpregs.f_fpcntl)
: /* no inputs */
: "memory");
err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs,
sizeof(fpregs));
}
if (context_size)
err |= copy_to_user((long __user *)&uc->uc_fpstate + 1, fpstate + 4,
context_size);
return err;
}
static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
unsigned long mask)
{
sc->sc_mask = mask;
sc->sc_usp = rdusp();
sc->sc_d0 = regs->d0;
sc->sc_d1 = regs->d1;
sc->sc_a0 = regs->a0;
sc->sc_a1 = regs->a1;
sc->sc_sr = regs->sr;
sc->sc_pc = regs->pc;
sc->sc_formatvec = regs->format << 12 | regs->vector;
save_fpu_state(sc, regs);
}
static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs)
{
struct switch_stack *sw = (struct switch_stack *)regs - 1;
greg_t __user *gregs = uc->uc_mcontext.gregs;
int err = 0;
err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
err |= __put_user(regs->d0, &gregs[0]);
err |= __put_user(regs->d1, &gregs[1]);
err |= __put_user(regs->d2, &gregs[2]);
err |= __put_user(regs->d3, &gregs[3]);
err |= __put_user(regs->d4, &gregs[4]);
err |= __put_user(regs->d5, &gregs[5]);
err |= __put_user(sw->d6, &gregs[6]);
err |= __put_user(sw->d7, &gregs[7]);
err |= __put_user(regs->a0, &gregs[8]);
err |= __put_user(regs->a1, &gregs[9]);
err |= __put_user(regs->a2, &gregs[10]);
err |= __put_user(sw->a3, &gregs[11]);
err |= __put_user(sw->a4, &gregs[12]);
err |= __put_user(sw->a5, &gregs[13]);
err |= __put_user(sw->a6, &gregs[14]);
err |= __put_user(rdusp(), &gregs[15]);
err |= __put_user(regs->pc, &gregs[16]);
err |= __put_user(regs->sr, &gregs[17]);
err |= __put_user((regs->format << 12) | regs->vector, &uc->uc_formatvec);
err |= rt_save_fpu_state(uc, regs);
return err;
}
static inline void push_cache (unsigned long vaddr)
{
/*
* Using the old cache_push_v() was really a big waste.
*
* What we are trying to do is to flush 8 bytes to ram.
* Flushing 2 cache lines of 16 bytes is much cheaper than
* flushing 1 or 2 pages, as previously done in
* cache_push_v().
* Jes
*/
if (CPU_IS_040) {
unsigned long temp;
__asm__ __volatile__ (".chip 68040\n\t"
"nop\n\t"
"ptestr (%1)\n\t"
"movec %%mmusr,%0\n\t"
".chip 68k"
: "=r" (temp)
: "a" (vaddr));
temp &= PAGE_MASK;
temp |= vaddr & ~PAGE_MASK;
__asm__ __volatile__ (".chip 68040\n\t"
"nop\n\t"
"cpushl %%bc,(%0)\n\t"
".chip 68k"
: : "a" (temp));
}
else if (CPU_IS_060) {
unsigned long temp;
__asm__ __volatile__ (".chip 68060\n\t"
"plpar (%0)\n\t"
".chip 68k"
: "=a" (temp)
: "0" (vaddr));
__asm__ __volatile__ (".chip 68060\n\t"
"cpushl %%bc,(%0)\n\t"
".chip 68k"
: : "a" (temp));
}
else {
/*
* 68030/68020 have no writeback cache;
* still need to clear icache.
* Note that vaddr is guaranteed to be long word aligned.
*/
unsigned long temp;
asm volatile ("movec %%cacr,%0" : "=r" (temp));
temp += 4;
asm volatile ("movec %0,%%caar\n\t"
"movec %1,%%cacr"
: : "r" (vaddr), "r" (temp));
asm volatile ("movec %0,%%caar\n\t"
"movec %1,%%cacr"
: : "r" (vaddr + 4), "r" (temp));
}
}
static inline void __user *
get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
{
unsigned long usp;
/* Default to using normal stack. */
usp = rdusp();
/* This is the X/Open sanctioned signal stack switching. */
if (ka->sa.sa_flags & SA_ONSTACK) {
if (!sas_ss_flags(usp))
usp = current->sas_ss_sp + current->sas_ss_size;
}
return (void __user *)((usp - frame_size) & -8UL);
}
static int setup_frame (int sig, struct k_sigaction *ka,
sigset_t *set, struct pt_regs *regs)
{
struct sigframe __user *frame;
int fsize = frame_extra_sizes[regs->format];
struct sigcontext context;
int err = 0;
if (fsize < 0) {
#ifdef DEBUG
printk ("setup_frame: Unknown frame format %#x\n",
regs->format);
#endif
goto give_sigsegv;
}
frame = get_sigframe(ka, regs, sizeof(*frame) + fsize);
if (fsize)
err |= copy_to_user (frame + 1, regs + 1, fsize);
err |= __put_user((current_thread_info()->exec_domain
&& current_thread_info()->exec_domain->signal_invmap
&& sig < 32
? current_thread_info()->exec_domain->signal_invmap[sig]
: sig),
&frame->sig);
err |= __put_user(regs->vector, &frame->code);
err |= __put_user(&frame->sc, &frame->psc);
if (_NSIG_WORDS > 1)
err |= copy_to_user(frame->extramask, &set->sig[1],
sizeof(frame->extramask));
setup_sigcontext(&context, regs, set->sig[0]);
err |= copy_to_user (&frame->sc, &context, sizeof(context));
/* Set up to return from userspace. */
err |= __put_user(frame->retcode, &frame->pretcode);
/* moveq #,d0; trap #0 */
err |= __put_user(0x70004e40 + (__NR_sigreturn << 16),
(long __user *)(frame->retcode));
if (err)
goto give_sigsegv;
push_cache ((unsigned long) &frame->retcode);
/*
* Set up registers for signal handler. All the state we are about
* to destroy is successfully copied to sigframe.
*/
wrusp ((unsigned long) frame);
regs->pc = (unsigned long) ka->sa.sa_handler;
/*
* This is subtle; if we build more than one sigframe, all but the
* first one will see frame format 0 and have fsize == 0, so we won't
* screw stkadj.
*/
if (fsize)
regs->stkadj = fsize;
/* Prepare to skip over the extra stuff in the exception frame. */
if (regs->stkadj) {
struct pt_regs *tregs =
(struct pt_regs *)((ulong)regs + regs->stkadj);
#ifdef DEBUG
printk("Performing stackadjust=%04x\n", regs->stkadj);
#endif
/* This must be copied with decreasing addresses to
handle overlaps. */
tregs->vector = 0;
tregs->format = 0;
tregs->pc = regs->pc;
tregs->sr = regs->sr;
}
return 0;
give_sigsegv:
force_sigsegv(sig, current);
return err;
}
static int setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs)
{
struct rt_sigframe __user *frame;
int fsize = frame_extra_sizes[regs->format];
int err = 0;
if (fsize < 0) {
#ifdef DEBUG
printk ("setup_frame: Unknown frame format %#x\n",
regs->format);
#endif
goto give_sigsegv;
}
frame = get_sigframe(ka, regs, sizeof(*frame));
if (fsize)
err |= copy_to_user (&frame->uc.uc_extra, regs + 1, fsize);
err |= __put_user((current_thread_info()->exec_domain
&& current_thread_info()->exec_domain->signal_invmap
&& sig < 32
? current_thread_info()->exec_domain->signal_invmap[sig]
: sig),
&frame->sig);
err |= __put_user(&frame->info, &frame->pinfo);
err |= __put_user(&frame->uc, &frame->puc);
err |= copy_siginfo_to_user(&frame->info, info);
/* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags);
err |= __put_user(NULL, &frame->uc.uc_link);
err |= __put_user((void __user *)current->sas_ss_sp,
&frame->uc.uc_stack.ss_sp);
err |= __put_user(sas_ss_flags(rdusp()),
&frame->uc.uc_stack.ss_flags);
err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
err |= rt_setup_ucontext(&frame->uc, regs);
err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set));
/* Set up to return from userspace. */
err |= __put_user(frame->retcode, &frame->pretcode);
#ifdef __mcoldfire__
/* movel #__NR_rt_sigreturn,d0; trap #0 */
err |= __put_user(0x203c0000, (long __user *)(frame->retcode + 0));
err |= __put_user(0x00004e40 + (__NR_rt_sigreturn << 16),
(long __user *)(frame->retcode + 4));
#else #else
/* moveq #,d0; notb d0; trap #0 */ #include "signal_no.c"
err |= __put_user(0x70004600 + ((__NR_rt_sigreturn ^ 0xff) << 16),
(long __user *)(frame->retcode + 0));
err |= __put_user(0x4e40, (short __user *)(frame->retcode + 4));
#endif
if (err)
goto give_sigsegv;
push_cache ((unsigned long) &frame->retcode);
/*
* Set up registers for signal handler. All the state we are about
* to destroy is successfully copied to sigframe.
*/
wrusp ((unsigned long) frame);
regs->pc = (unsigned long) ka->sa.sa_handler;
/*
* This is subtle; if we build more than one sigframe, all but the
* first one will see frame format 0 and have fsize == 0, so we won't
* screw stkadj.
*/
if (fsize)
regs->stkadj = fsize;
/* Prepare to skip over the extra stuff in the exception frame. */
if (regs->stkadj) {
struct pt_regs *tregs =
(struct pt_regs *)((ulong)regs + regs->stkadj);
#ifdef DEBUG
printk("Performing stackadjust=%04x\n", regs->stkadj);
#endif #endif
/* This must be copied with decreasing addresses to
handle overlaps. */
tregs->vector = 0;
tregs->format = 0;
tregs->pc = regs->pc;
tregs->sr = regs->sr;
}
return 0;
give_sigsegv:
force_sigsegv(sig, current);
return err;
}
static inline void
handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
{
switch (regs->d0) {
case -ERESTARTNOHAND:
if (!has_handler)
goto do_restart;
regs->d0 = -EINTR;
break;
case -ERESTART_RESTARTBLOCK:
if (!has_handler) {
regs->d0 = __NR_restart_syscall;
regs->pc -= 2;
break;
}
regs->d0 = -EINTR;
break;
case -ERESTARTSYS:
if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
regs->d0 = -EINTR;
break;
}
/* fallthrough */
case -ERESTARTNOINTR:
do_restart:
regs->d0 = regs->orig_d0;
regs->pc -= 2;
break;
}
}
void ptrace_signal_deliver(struct pt_regs *regs, void *cookie)
{
if (regs->orig_d0 < 0)
return;
switch (regs->d0) {
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
regs->d0 = regs->orig_d0;
regs->orig_d0 = -1;
regs->pc -= 2;
break;
}
}
/*
* OK, we're invoking a handler
*/
static void
handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *oldset, struct pt_regs *regs)
{
int err;
/* are we from a system call? */
if (regs->orig_d0 >= 0)
/* If so, check system call restarting.. */
handle_restart(regs, ka, 1);
/* set up the stack frame */
if (ka->sa.sa_flags & SA_SIGINFO)
err = setup_rt_frame(sig, ka, info, oldset, regs);
else
err = setup_frame(sig, ka, oldset, regs);
if (err)
return;
sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NODEFER))
sigaddset(&current->blocked,sig);
recalc_sigpending();
if (test_thread_flag(TIF_DELAYED_TRACE)) {
regs->sr &= ~0x8000;
send_sig(SIGTRAP, current, 1);
}
clear_thread_flag(TIF_RESTORE_SIGMASK);
}
/*
* Note that 'init' is a special process: it doesn't get signals it doesn't
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*/
asmlinkage void do_signal(struct pt_regs *regs)
{
siginfo_t info;
struct k_sigaction ka;
int signr;
sigset_t *oldset;
current->thread.esp0 = (unsigned long) regs;
if (test_thread_flag(TIF_RESTORE_SIGMASK))
oldset = &current->saved_sigmask;
else
oldset = &current->blocked;
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
if (signr > 0) {
/* Whee! Actually deliver the signal. */
handle_signal(signr, &ka, &info, oldset, regs);
return;
}
/* Did we come from a system call? */
if (regs->orig_d0 >= 0)
/* Restart the system call - no handlers present */
handle_restart(regs, NULL, 0);
/* If there's no signal to deliver, we just restore the saved mask. */
if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
clear_thread_flag(TIF_RESTORE_SIGMASK);
sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
}
}
/*
* linux/arch/m68k/kernel/signal.c
*
* Copyright (C) 1991, 1992 Linus Torvalds
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
/*
* Linux/m68k support by Hamish Macdonald
*
* 68060 fixes by Jesper Skov
*
* 1997-12-01 Modified for POSIX.1b signals by Andreas Schwab
*
* mathemu support by Roman Zippel
* (Note: fpstate in the signal context is completely ignored for the emulator
* and the internal floating point format is put on stack)
*/
/*
* ++roman (07/09/96): implemented signal stacks (specially for tosemu on
* Atari :-) Current limitation: Only one sigstack can be active at one time.
* If a second signal with SA_ONSTACK set arrives while working on a sigstack,
* SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested
* signal handlers!
*/
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/syscalls.h>
#include <linux/errno.h>
#include <linux/wait.h>
#include <linux/ptrace.h>
#include <linux/unistd.h>
#include <linux/stddef.h>
#include <linux/highuid.h>
#include <linux/personality.h>
#include <linux/tty.h>
#include <linux/binfmts.h>
#include <linux/module.h>
#include <asm/setup.h>
#include <asm/uaccess.h>
#include <asm/pgtable.h>
#include <asm/traps.h>
#include <asm/ucontext.h>
#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
static const int frame_extra_sizes[16] = {
[1] = -1, /* sizeof(((struct frame *)0)->un.fmt1), */
[2] = sizeof(((struct frame *)0)->un.fmt2),
[3] = sizeof(((struct frame *)0)->un.fmt3),
[4] = sizeof(((struct frame *)0)->un.fmt4),
[5] = -1, /* sizeof(((struct frame *)0)->un.fmt5), */
[6] = -1, /* sizeof(((struct frame *)0)->un.fmt6), */
[7] = sizeof(((struct frame *)0)->un.fmt7),
[8] = -1, /* sizeof(((struct frame *)0)->un.fmt8), */
[9] = sizeof(((struct frame *)0)->un.fmt9),
[10] = sizeof(((struct frame *)0)->un.fmta),
[11] = sizeof(((struct frame *)0)->un.fmtb),
[12] = -1, /* sizeof(((struct frame *)0)->un.fmtc), */
[13] = -1, /* sizeof(((struct frame *)0)->un.fmtd), */
[14] = -1, /* sizeof(((struct frame *)0)->un.fmte), */
[15] = -1, /* sizeof(((struct frame *)0)->un.fmtf), */
};
int handle_kernel_fault(struct pt_regs *regs)
{
const struct exception_table_entry *fixup;
struct pt_regs *tregs;
/* Are we prepared to handle this kernel fault? */
fixup = search_exception_tables(regs->pc);
if (!fixup)
return 0;
/* Create a new four word stack frame, discarding the old one. */
regs->stkadj = frame_extra_sizes[regs->format];
tregs = (struct pt_regs *)((long)regs + regs->stkadj);
tregs->vector = regs->vector;
tregs->format = 0;
tregs->pc = fixup->fixup;
tregs->sr = regs->sr;
return 1;
}
/*
* Atomically swap in the new signal mask, and wait for a signal.
*/
asmlinkage int
sys_sigsuspend(int unused0, int unused1, old_sigset_t mask)
{
mask &= _BLOCKABLE;
spin_lock_irq(&current->sighand->siglock);
current->saved_sigmask = current->blocked;
siginitset(&current->blocked, mask);
recalc_sigpending();
spin_unlock_irq(&current->sighand->siglock);
current->state = TASK_INTERRUPTIBLE;
schedule();
set_restore_sigmask();
return -ERESTARTNOHAND;
}
asmlinkage int
sys_sigaction(int sig, const struct old_sigaction __user *act,
struct old_sigaction __user *oact)
{
struct k_sigaction new_ka, old_ka;
int ret;
if (act) {
old_sigset_t mask;
if (!access_ok(VERIFY_READ, act, sizeof(*act)) ||
__get_user(new_ka.sa.sa_handler, &act->sa_handler) ||
__get_user(new_ka.sa.sa_restorer, &act->sa_restorer) ||
__get_user(new_ka.sa.sa_flags, &act->sa_flags) ||
__get_user(mask, &act->sa_mask))
return -EFAULT;
siginitset(&new_ka.sa.sa_mask, mask);
}
ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
if (!ret && oact) {
if (!access_ok(VERIFY_WRITE, oact, sizeof(*oact)) ||
__put_user(old_ka.sa.sa_handler, &oact->sa_handler) ||
__put_user(old_ka.sa.sa_restorer, &oact->sa_restorer) ||
__put_user(old_ka.sa.sa_flags, &oact->sa_flags) ||
__put_user(old_ka.sa.sa_mask.sig[0], &oact->sa_mask))
return -EFAULT;
}
return ret;
}
asmlinkage int
sys_sigaltstack(const stack_t __user *uss, stack_t __user *uoss)
{
return do_sigaltstack(uss, uoss, rdusp());
}
/*
* Do a signal return; undo the signal stack.
*
* Keep the return code on the stack quadword aligned!
* That makes the cache flush below easier.
*/
struct sigframe
{
char __user *pretcode;
int sig;
int code;
struct sigcontext __user *psc;
char retcode[8];
unsigned long extramask[_NSIG_WORDS-1];
struct sigcontext sc;
};
struct rt_sigframe
{
char __user *pretcode;
int sig;
struct siginfo __user *pinfo;
void __user *puc;
char retcode[8];
struct siginfo info;
struct ucontext uc;
};
static unsigned char fpu_version; /* version number of fpu, set by setup_frame */
static inline int restore_fpu_state(struct sigcontext *sc)
{
int err = 1;
if (FPU_IS_EMU) {
/* restore registers */
memcpy(current->thread.fpcntl, sc->sc_fpcntl, 12);
memcpy(current->thread.fp, sc->sc_fpregs, 24);
return 0;
}
if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
/* Verify the frame format. */
if (!CPU_IS_060 && (sc->sc_fpstate[0] != fpu_version))
goto out;
if (CPU_IS_020_OR_030) {
if (m68k_fputype & FPU_68881 &&
!(sc->sc_fpstate[1] == 0x18 || sc->sc_fpstate[1] == 0xb4))
goto out;
if (m68k_fputype & FPU_68882 &&
!(sc->sc_fpstate[1] == 0x38 || sc->sc_fpstate[1] == 0xd4))
goto out;
} else if (CPU_IS_040) {
if (!(sc->sc_fpstate[1] == 0x00 ||
sc->sc_fpstate[1] == 0x28 ||
sc->sc_fpstate[1] == 0x60))
goto out;
} else if (CPU_IS_060) {
if (!(sc->sc_fpstate[3] == 0x00 ||
sc->sc_fpstate[3] == 0x60 ||
sc->sc_fpstate[3] == 0xe0))
goto out;
} else
goto out;
__asm__ volatile (".chip 68k/68881\n\t"
"fmovemx %0,%%fp0-%%fp1\n\t"
"fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
".chip 68k"
: /* no outputs */
: "m" (*sc->sc_fpregs), "m" (*sc->sc_fpcntl));
}
__asm__ volatile (".chip 68k/68881\n\t"
"frestore %0\n\t"
".chip 68k" : : "m" (*sc->sc_fpstate));
err = 0;
out:
return err;
}
#define FPCONTEXT_SIZE 216
#define uc_fpstate uc_filler[0]
#define uc_formatvec uc_filler[FPCONTEXT_SIZE/4]
#define uc_extra uc_filler[FPCONTEXT_SIZE/4+1]
static inline int rt_restore_fpu_state(struct ucontext __user *uc)
{
unsigned char fpstate[FPCONTEXT_SIZE];
int context_size = CPU_IS_060 ? 8 : 0;
fpregset_t fpregs;
int err = 1;
if (FPU_IS_EMU) {
/* restore fpu control register */
if (__copy_from_user(current->thread.fpcntl,
uc->uc_mcontext.fpregs.f_fpcntl, 12))
goto out;
/* restore all other fpu register */
if (__copy_from_user(current->thread.fp,
uc->uc_mcontext.fpregs.f_fpregs, 96))
goto out;
return 0;
}
if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate))
goto out;
if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
if (!CPU_IS_060)
context_size = fpstate[1];
/* Verify the frame format. */
if (!CPU_IS_060 && (fpstate[0] != fpu_version))
goto out;
if (CPU_IS_020_OR_030) {
if (m68k_fputype & FPU_68881 &&
!(context_size == 0x18 || context_size == 0xb4))
goto out;
if (m68k_fputype & FPU_68882 &&
!(context_size == 0x38 || context_size == 0xd4))
goto out;
} else if (CPU_IS_040) {
if (!(context_size == 0x00 ||
context_size == 0x28 ||
context_size == 0x60))
goto out;
} else if (CPU_IS_060) {
if (!(fpstate[3] == 0x00 ||
fpstate[3] == 0x60 ||
fpstate[3] == 0xe0))
goto out;
} else
goto out;
if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs,
sizeof(fpregs)))
goto out;
__asm__ volatile (".chip 68k/68881\n\t"
"fmovemx %0,%%fp0-%%fp7\n\t"
"fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
".chip 68k"
: /* no outputs */
: "m" (*fpregs.f_fpregs),
"m" (*fpregs.f_fpcntl));
}
if (context_size &&
__copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1,
context_size))
goto out;
__asm__ volatile (".chip 68k/68881\n\t"
"frestore %0\n\t"
".chip 68k" : : "m" (*fpstate));
err = 0;
out:
return err;
}
static int mangle_kernel_stack(struct pt_regs *regs, int formatvec,
void __user *fp)
{
int fsize = frame_extra_sizes[formatvec >> 12];
if (fsize < 0) {
/*
* user process trying to return with weird frame format
*/
#ifdef DEBUG
printk("user process returning with weird frame format\n");
#endif
return 1;
}
if (!fsize) {
regs->format = formatvec >> 12;
regs->vector = formatvec & 0xfff;
} else {
struct switch_stack *sw = (struct switch_stack *)regs - 1;
unsigned long buf[fsize / 2]; /* yes, twice as much */
/* that'll make sure that expansion won't crap over data */
if (copy_from_user(buf + fsize / 4, fp, fsize))
return 1;
/* point of no return */
regs->format = formatvec >> 12;
regs->vector = formatvec & 0xfff;
#define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
__asm__ __volatile__
(" movel %0,%/a0\n\t"
" subl %1,%/a0\n\t" /* make room on stack */
" movel %/a0,%/sp\n\t" /* set stack pointer */
/* move switch_stack and pt_regs */
"1: movel %0@+,%/a0@+\n\t"
" dbra %2,1b\n\t"
" lea %/sp@(%c3),%/a0\n\t" /* add offset of fmt */
" lsrl #2,%1\n\t"
" subql #1,%1\n\t"
/* copy to the gap we'd made */
"2: movel %4@+,%/a0@+\n\t"
" dbra %1,2b\n\t"
" bral ret_from_signal\n"
: /* no outputs, it doesn't ever return */
: "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
"n" (frame_offset), "a" (buf + fsize/4)
: "a0");
#undef frame_offset
}
return 0;
}
static inline int
restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __user *fp)
{
int formatvec;
struct sigcontext context;
int err;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
/* get previous context */
if (copy_from_user(&context, usc, sizeof(context)))
goto badframe;
/* restore passed registers */
regs->d0 = context.sc_d0;
regs->d1 = context.sc_d1;
regs->a0 = context.sc_a0;
regs->a1 = context.sc_a1;
regs->sr = (regs->sr & 0xff00) | (context.sc_sr & 0xff);
regs->pc = context.sc_pc;
regs->orig_d0 = -1; /* disable syscall checks */
wrusp(context.sc_usp);
formatvec = context.sc_formatvec;
err = restore_fpu_state(&context);
if (err || mangle_kernel_stack(regs, formatvec, fp))
goto badframe;
return 0;
badframe:
return 1;
}
static inline int
rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
struct ucontext __user *uc)
{
int temp;
greg_t __user *gregs = uc->uc_mcontext.gregs;
unsigned long usp;
int err;
/* Always make any pending restarted system calls return -EINTR */
current_thread_info()->restart_block.fn = do_no_restart_syscall;
err = __get_user(temp, &uc->uc_mcontext.version);
if (temp != MCONTEXT_VERSION)
goto badframe;
/* restore passed registers */
err |= __get_user(regs->d0, &gregs[0]);
err |= __get_user(regs->d1, &gregs[1]);
err |= __get_user(regs->d2, &gregs[2]);
err |= __get_user(regs->d3, &gregs[3]);
err |= __get_user(regs->d4, &gregs[4]);
err |= __get_user(regs->d5, &gregs[5]);
err |= __get_user(sw->d6, &gregs[6]);
err |= __get_user(sw->d7, &gregs[7]);
err |= __get_user(regs->a0, &gregs[8]);
err |= __get_user(regs->a1, &gregs[9]);
err |= __get_user(regs->a2, &gregs[10]);
err |= __get_user(sw->a3, &gregs[11]);
err |= __get_user(sw->a4, &gregs[12]);
err |= __get_user(sw->a5, &gregs[13]);
err |= __get_user(sw->a6, &gregs[14]);
err |= __get_user(usp, &gregs[15]);
wrusp(usp);
err |= __get_user(regs->pc, &gregs[16]);
err |= __get_user(temp, &gregs[17]);
regs->sr = (regs->sr & 0xff00) | (temp & 0xff);
regs->orig_d0 = -1; /* disable syscall checks */
err |= __get_user(temp, &uc->uc_formatvec);
err |= rt_restore_fpu_state(uc);
if (err || do_sigaltstack(&uc->uc_stack, NULL, usp) == -EFAULT)
goto badframe;
if (mangle_kernel_stack(regs, temp, &uc->uc_extra))
goto badframe;
return 0;
badframe:
return 1;
}
asmlinkage int do_sigreturn(unsigned long __unused)
{
struct switch_stack *sw = (struct switch_stack *) &__unused;
struct pt_regs *regs = (struct pt_regs *) (sw + 1);
unsigned long usp = rdusp();
struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);
sigset_t set;
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__get_user(set.sig[0], &frame->sc.sc_mask) ||
(_NSIG_WORDS > 1 &&
__copy_from_user(&set.sig[1], &frame->extramask,
sizeof(frame->extramask))))
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
current->blocked = set;
recalc_sigpending();
if (restore_sigcontext(regs, &frame->sc, frame + 1))
goto badframe;
return regs->d0;
badframe:
force_sig(SIGSEGV, current);
return 0;
}
asmlinkage int do_rt_sigreturn(unsigned long __unused)
{
struct switch_stack *sw = (struct switch_stack *) &__unused;
struct pt_regs *regs = (struct pt_regs *) (sw + 1);
unsigned long usp = rdusp();
struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4);
sigset_t set;
if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
goto badframe;
if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
goto badframe;
sigdelsetmask(&set, ~_BLOCKABLE);
current->blocked = set;
recalc_sigpending();
if (rt_restore_ucontext(regs, sw, &frame->uc))
goto badframe;
return regs->d0;
badframe:
force_sig(SIGSEGV, current);
return 0;
}
/*
* Set up a signal frame.
*/
static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
{
if (FPU_IS_EMU) {
/* save registers */
memcpy(sc->sc_fpcntl, current->thread.fpcntl, 12);
memcpy(sc->sc_fpregs, current->thread.fp, 24);
return;
}
__asm__ volatile (".chip 68k/68881\n\t"
"fsave %0\n\t"
".chip 68k"
: : "m" (*sc->sc_fpstate) : "memory");
if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
fpu_version = sc->sc_fpstate[0];
if (CPU_IS_020_OR_030 &&
regs->vector >= (VEC_FPBRUC * 4) &&
regs->vector <= (VEC_FPNAN * 4)) {
/* Clear pending exception in 68882 idle frame */
if (*(unsigned short *) sc->sc_fpstate == 0x1f38)
sc->sc_fpstate[0x38] |= 1 << 3;
}
__asm__ volatile (".chip 68k/68881\n\t"
"fmovemx %%fp0-%%fp1,%0\n\t"
"fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
".chip 68k"
: "=m" (*sc->sc_fpregs),
"=m" (*sc->sc_fpcntl)
: /* no inputs */
: "memory");
}
}
static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs)
{
unsigned char fpstate[FPCONTEXT_SIZE];
int context_size = CPU_IS_060 ? 8 : 0;
int err = 0;
if (FPU_IS_EMU) {
/* save fpu control register */
err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpcntl,
current->thread.fpcntl, 12);
/* save all other fpu register */
err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpregs,
current->thread.fp, 96);
return err;
}
__asm__ volatile (".chip 68k/68881\n\t"
"fsave %0\n\t"
".chip 68k"
: : "m" (*fpstate) : "memory");
err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate);
if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
fpregset_t fpregs;
if (!CPU_IS_060)
context_size = fpstate[1];
fpu_version = fpstate[0];
if (CPU_IS_020_OR_030 &&
regs->vector >= (VEC_FPBRUC * 4) &&
regs->vector <= (VEC_FPNAN * 4)) {
/* Clear pending exception in 68882 idle frame */
if (*(unsigned short *) fpstate == 0x1f38)
fpstate[0x38] |= 1 << 3;
}
__asm__ volatile (".chip 68k/68881\n\t"
"fmovemx %%fp0-%%fp7,%0\n\t"
"fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
".chip 68k"
: "=m" (*fpregs.f_fpregs),
"=m" (*fpregs.f_fpcntl)
: /* no inputs */
: "memory");
err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs,
sizeof(fpregs));
}
if (context_size)
err |= copy_to_user((long __user *)&uc->uc_fpstate + 1, fpstate + 4,
context_size);
return err;
}
static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
unsigned long mask)
{
sc->sc_mask = mask;
sc->sc_usp = rdusp();
sc->sc_d0 = regs->d0;
sc->sc_d1 = regs->d1;
sc->sc_a0 = regs->a0;
sc->sc_a1 = regs->a1;
sc->sc_sr = regs->sr;
sc->sc_pc = regs->pc;
sc->sc_formatvec = regs->format << 12 | regs->vector;
save_fpu_state(sc, regs);
}
static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs)
{
struct switch_stack *sw = (struct switch_stack *)regs - 1;
greg_t __user *gregs = uc->uc_mcontext.gregs;
int err = 0;
err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
err |= __put_user(regs->d0, &gregs[0]);
err |= __put_user(regs->d1, &gregs[1]);
err |= __put_user(regs->d2, &gregs[2]);
err |= __put_user(regs->d3, &gregs[3]);
err |= __put_user(regs->d4, &gregs[4]);
err |= __put_user(regs->d5, &gregs[5]);
err |= __put_user(sw->d6, &gregs[6]);
err |= __put_user(sw->d7, &gregs[7]);
err |= __put_user(regs->a0, &gregs[8]);
err |= __put_user(regs->a1, &gregs[9]);
err |= __put_user(regs->a2, &gregs[10]);
err |= __put_user(sw->a3, &gregs[11]);
err |= __put_user(sw->a4, &gregs[12]);
err |= __put_user(sw->a5, &gregs[13]);
err |= __put_user(sw->a6, &gregs[14]);
err |= __put_user(rdusp(), &gregs[15]);
err |= __put_user(regs->pc, &gregs[16]);
err |= __put_user(regs->sr, &gregs[17]);
err |= __put_user((regs->format << 12) | regs->vector, &uc->uc_formatvec);
err |= rt_save_fpu_state(uc, regs);
return err;
}
static inline void push_cache (unsigned long vaddr)
{
/*
* Using the old cache_push_v() was really a big waste.
*
* What we are trying to do is to flush 8 bytes to ram.
* Flushing 2 cache lines of 16 bytes is much cheaper than
* flushing 1 or 2 pages, as previously done in
* cache_push_v().
* Jes
*/
if (CPU_IS_040) {
unsigned long temp;
__asm__ __volatile__ (".chip 68040\n\t"
"nop\n\t"
"ptestr (%1)\n\t"
"movec %%mmusr,%0\n\t"
".chip 68k"
: "=r" (temp)
: "a" (vaddr));
temp &= PAGE_MASK;
temp |= vaddr & ~PAGE_MASK;
__asm__ __volatile__ (".chip 68040\n\t"
"nop\n\t"
"cpushl %%bc,(%0)\n\t"
".chip 68k"
: : "a" (temp));
}
else if (CPU_IS_060) {
unsigned long temp;
__asm__ __volatile__ (".chip 68060\n\t"
"plpar (%0)\n\t"
".chip 68k"
: "=a" (temp)
: "0" (vaddr));
__asm__ __volatile__ (".chip 68060\n\t"
"cpushl %%bc,(%0)\n\t"
".chip 68k"
: : "a" (temp));
}
else {
/*
* 68030/68020 have no writeback cache;
* still need to clear icache.
* Note that vaddr is guaranteed to be long word aligned.
*/
unsigned long temp;
asm volatile ("movec %%cacr,%0" : "=r" (temp));
temp += 4;
asm volatile ("movec %0,%%caar\n\t"
"movec %1,%%cacr"
: : "r" (vaddr), "r" (temp));
asm volatile ("movec %0,%%caar\n\t"
"movec %1,%%cacr"
: : "r" (vaddr + 4), "r" (temp));
}
}
static inline void __user *
get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size)
{
unsigned long usp;
/* Default to using normal stack. */
usp = rdusp();
/* This is the X/Open sanctioned signal stack switching. */
if (ka->sa.sa_flags & SA_ONSTACK) {
if (!sas_ss_flags(usp))
usp = current->sas_ss_sp + current->sas_ss_size;
}
return (void __user *)((usp - frame_size) & -8UL);
}
static int setup_frame (int sig, struct k_sigaction *ka,
sigset_t *set, struct pt_regs *regs)
{
struct sigframe __user *frame;
int fsize = frame_extra_sizes[regs->format];
struct sigcontext context;
int err = 0;
if (fsize < 0) {
#ifdef DEBUG
printk ("setup_frame: Unknown frame format %#x\n",
regs->format);
#endif
goto give_sigsegv;
}
frame = get_sigframe(ka, regs, sizeof(*frame) + fsize);
if (fsize)
err |= copy_to_user (frame + 1, regs + 1, fsize);
err |= __put_user((current_thread_info()->exec_domain
&& current_thread_info()->exec_domain->signal_invmap
&& sig < 32
? current_thread_info()->exec_domain->signal_invmap[sig]
: sig),
&frame->sig);
err |= __put_user(regs->vector, &frame->code);
err |= __put_user(&frame->sc, &frame->psc);
if (_NSIG_WORDS > 1)
err |= copy_to_user(frame->extramask, &set->sig[1],
sizeof(frame->extramask));
setup_sigcontext(&context, regs, set->sig[0]);
err |= copy_to_user (&frame->sc, &context, sizeof(context));
/* Set up to return from userspace. */
err |= __put_user(frame->retcode, &frame->pretcode);
/* moveq #,d0; trap #0 */
err |= __put_user(0x70004e40 + (__NR_sigreturn << 16),
(long __user *)(frame->retcode));
if (err)
goto give_sigsegv;
push_cache ((unsigned long) &frame->retcode);
/*
* Set up registers for signal handler. All the state we are about
* to destroy is successfully copied to sigframe.
*/
wrusp ((unsigned long) frame);
regs->pc = (unsigned long) ka->sa.sa_handler;
/*
* This is subtle; if we build more than one sigframe, all but the
* first one will see frame format 0 and have fsize == 0, so we won't
* screw stkadj.
*/
if (fsize)
regs->stkadj = fsize;
/* Prepare to skip over the extra stuff in the exception frame. */
if (regs->stkadj) {
struct pt_regs *tregs =
(struct pt_regs *)((ulong)regs + regs->stkadj);
#ifdef DEBUG
printk("Performing stackadjust=%04x\n", regs->stkadj);
#endif
/* This must be copied with decreasing addresses to
handle overlaps. */
tregs->vector = 0;
tregs->format = 0;
tregs->pc = regs->pc;
tregs->sr = regs->sr;
}
return 0;
give_sigsegv:
force_sigsegv(sig, current);
return err;
}
static int setup_rt_frame (int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *set, struct pt_regs *regs)
{
struct rt_sigframe __user *frame;
int fsize = frame_extra_sizes[regs->format];
int err = 0;
if (fsize < 0) {
#ifdef DEBUG
printk ("setup_frame: Unknown frame format %#x\n",
regs->format);
#endif
goto give_sigsegv;
}
frame = get_sigframe(ka, regs, sizeof(*frame));
if (fsize)
err |= copy_to_user (&frame->uc.uc_extra, regs + 1, fsize);
err |= __put_user((current_thread_info()->exec_domain
&& current_thread_info()->exec_domain->signal_invmap
&& sig < 32
? current_thread_info()->exec_domain->signal_invmap[sig]
: sig),
&frame->sig);
err |= __put_user(&frame->info, &frame->pinfo);
err |= __put_user(&frame->uc, &frame->puc);
err |= copy_siginfo_to_user(&frame->info, info);
/* Create the ucontext. */
err |= __put_user(0, &frame->uc.uc_flags);
err |= __put_user(NULL, &frame->uc.uc_link);
err |= __put_user((void __user *)current->sas_ss_sp,
&frame->uc.uc_stack.ss_sp);
err |= __put_user(sas_ss_flags(rdusp()),
&frame->uc.uc_stack.ss_flags);
err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
err |= rt_setup_ucontext(&frame->uc, regs);
err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set));
/* Set up to return from userspace. */
err |= __put_user(frame->retcode, &frame->pretcode);
#ifdef __mcoldfire__
/* movel #__NR_rt_sigreturn,d0; trap #0 */
err |= __put_user(0x203c0000, (long __user *)(frame->retcode + 0));
err |= __put_user(0x00004e40 + (__NR_rt_sigreturn << 16),
(long __user *)(frame->retcode + 4));
#else
/* moveq #,d0; notb d0; trap #0 */
err |= __put_user(0x70004600 + ((__NR_rt_sigreturn ^ 0xff) << 16),
(long __user *)(frame->retcode + 0));
err |= __put_user(0x4e40, (short __user *)(frame->retcode + 4));
#endif
if (err)
goto give_sigsegv;
push_cache ((unsigned long) &frame->retcode);
/*
* Set up registers for signal handler. All the state we are about
* to destroy is successfully copied to sigframe.
*/
wrusp ((unsigned long) frame);
regs->pc = (unsigned long) ka->sa.sa_handler;
/*
* This is subtle; if we build more than one sigframe, all but the
* first one will see frame format 0 and have fsize == 0, so we won't
* screw stkadj.
*/
if (fsize)
regs->stkadj = fsize;
/* Prepare to skip over the extra stuff in the exception frame. */
if (regs->stkadj) {
struct pt_regs *tregs =
(struct pt_regs *)((ulong)regs + regs->stkadj);
#ifdef DEBUG
printk("Performing stackadjust=%04x\n", regs->stkadj);
#endif
/* This must be copied with decreasing addresses to
handle overlaps. */
tregs->vector = 0;
tregs->format = 0;
tregs->pc = regs->pc;
tregs->sr = regs->sr;
}
return 0;
give_sigsegv:
force_sigsegv(sig, current);
return err;
}
static inline void
handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
{
switch (regs->d0) {
case -ERESTARTNOHAND:
if (!has_handler)
goto do_restart;
regs->d0 = -EINTR;
break;
case -ERESTART_RESTARTBLOCK:
if (!has_handler) {
regs->d0 = __NR_restart_syscall;
regs->pc -= 2;
break;
}
regs->d0 = -EINTR;
break;
case -ERESTARTSYS:
if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
regs->d0 = -EINTR;
break;
}
/* fallthrough */
case -ERESTARTNOINTR:
do_restart:
regs->d0 = regs->orig_d0;
regs->pc -= 2;
break;
}
}
void ptrace_signal_deliver(struct pt_regs *regs, void *cookie)
{
if (regs->orig_d0 < 0)
return;
switch (regs->d0) {
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
regs->d0 = regs->orig_d0;
regs->orig_d0 = -1;
regs->pc -= 2;
break;
}
}
/*
* OK, we're invoking a handler
*/
static void
handle_signal(int sig, struct k_sigaction *ka, siginfo_t *info,
sigset_t *oldset, struct pt_regs *regs)
{
int err;
/* are we from a system call? */
if (regs->orig_d0 >= 0)
/* If so, check system call restarting.. */
handle_restart(regs, ka, 1);
/* set up the stack frame */
if (ka->sa.sa_flags & SA_SIGINFO)
err = setup_rt_frame(sig, ka, info, oldset, regs);
else
err = setup_frame(sig, ka, oldset, regs);
if (err)
return;
sigorsets(&current->blocked,&current->blocked,&ka->sa.sa_mask);
if (!(ka->sa.sa_flags & SA_NODEFER))
sigaddset(&current->blocked,sig);
recalc_sigpending();
if (test_thread_flag(TIF_DELAYED_TRACE)) {
regs->sr &= ~0x8000;
send_sig(SIGTRAP, current, 1);
}
clear_thread_flag(TIF_RESTORE_SIGMASK);
}
/*
* Note that 'init' is a special process: it doesn't get signals it doesn't
* want to handle. Thus you cannot kill init even with a SIGKILL even by
* mistake.
*/
asmlinkage void do_signal(struct pt_regs *regs)
{
siginfo_t info;
struct k_sigaction ka;
int signr;
sigset_t *oldset;
current->thread.esp0 = (unsigned long) regs;
if (test_thread_flag(TIF_RESTORE_SIGMASK))
oldset = &current->saved_sigmask;
else
oldset = &current->blocked;
signr = get_signal_to_deliver(&info, &ka, regs, NULL);
if (signr > 0) {
/* Whee! Actually deliver the signal. */
handle_signal(signr, &ka, &info, oldset, regs);
return;
}
/* Did we come from a system call? */
if (regs->orig_d0 >= 0)
/* Restart the system call - no handlers present */
handle_restart(regs, NULL, 0);
/* If there's no signal to deliver, we just restore the saved mask. */
if (test_thread_flag(TIF_RESTORE_SIGMASK)) {
clear_thread_flag(TIF_RESTORE_SIGMASK);
sigprocmask(SIG_SETMASK, &current->saved_sigmask, NULL);
}
}
/* #ifdef CONFIG_MMU
* linux/arch/m68k/kernel/sys_m68k.c #include "sys_m68k_mm.c"
* #else
* This file contains various random system calls that #include "sys_m68k_no.c"
* have a non-standard calling sequence on the Linux/m68k #endif
* platform.
*/
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/stat.h>
#include <linux/syscalls.h>
#include <linux/mman.h>
#include <linux/file.h>
#include <linux/ipc.h>
#include <asm/setup.h>
#include <asm/uaccess.h>
#include <asm/cachectl.h>
#include <asm/traps.h>
#include <asm/page.h>
#include <asm/unistd.h>
#include <linux/elf.h>
#include <asm/tlb.h>
asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
unsigned long error_code);
asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff)
{
/*
* This is wrong for sun3 - there PAGE_SIZE is 8Kb,
* so we need to shift the argument down by 1; m68k mmap64(3)
* (in libc) expects the last argument of mmap2 in 4Kb units.
*/
return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
}
/* Convert virtual (user) address VADDR to physical address PADDR */
#define virt_to_phys_040(vaddr) \
({ \
unsigned long _mmusr, _paddr; \
\
__asm__ __volatile__ (".chip 68040\n\t" \
"ptestr (%1)\n\t" \
"movec %%mmusr,%0\n\t" \
".chip 68k" \
: "=r" (_mmusr) \
: "a" (vaddr)); \
_paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
_paddr; \
})
static inline int
cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
{
unsigned long paddr, i;
switch (scope)
{
case FLUSH_SCOPE_ALL:
switch (cache)
{
case FLUSH_CACHE_DATA:
/* This nop is needed for some broken versions of the 68040. */
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpusha %dc\n\t"
".chip 68k");
break;
case FLUSH_CACHE_INSN:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpusha %ic\n\t"
".chip 68k");
break;
default:
case FLUSH_CACHE_BOTH:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpusha %bc\n\t"
".chip 68k");
break;
}
break;
case FLUSH_SCOPE_LINE:
/* Find the physical address of the first mapped page in the
address range. */
if ((paddr = virt_to_phys_040(addr))) {
paddr += addr & ~(PAGE_MASK | 15);
len = (len + (addr & 15) + 15) >> 4;
} else {
unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
if (len <= tmp)
return 0;
addr += tmp;
len -= tmp;
tmp = PAGE_SIZE;
for (;;)
{
if ((paddr = virt_to_phys_040(addr)))
break;
if (len <= tmp)
return 0;
addr += tmp;
len -= tmp;
}
len = (len + 15) >> 4;
}
i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
while (len--)
{
switch (cache)
{
case FLUSH_CACHE_DATA:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpushl %%dc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
case FLUSH_CACHE_INSN:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpushl %%ic,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
default:
case FLUSH_CACHE_BOTH:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpushl %%bc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
}
if (!--i && len)
{
/*
* No need to page align here since it is done by
* virt_to_phys_040().
*/
addr += PAGE_SIZE;
i = PAGE_SIZE / 16;
/* Recompute physical address when crossing a page
boundary. */
for (;;)
{
if ((paddr = virt_to_phys_040(addr)))
break;
if (len <= i)
return 0;
len -= i;
addr += PAGE_SIZE;
}
}
else
paddr += 16;
}
break;
default:
case FLUSH_SCOPE_PAGE:
len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
{
if (!(paddr = virt_to_phys_040(addr)))
continue;
switch (cache)
{
case FLUSH_CACHE_DATA:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpushp %%dc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
case FLUSH_CACHE_INSN:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpushp %%ic,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
default:
case FLUSH_CACHE_BOTH:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpushp %%bc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
}
}
break;
}
return 0;
}
#define virt_to_phys_060(vaddr) \
({ \
unsigned long paddr; \
__asm__ __volatile__ (".chip 68060\n\t" \
"plpar (%0)\n\t" \
".chip 68k" \
: "=a" (paddr) \
: "0" (vaddr)); \
(paddr); /* XXX */ \
})
static inline int
cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
{
unsigned long paddr, i;
/*
* 68060 manual says:
* cpush %dc : flush DC, remains valid (with our %cacr setup)
* cpush %ic : invalidate IC
* cpush %bc : flush DC + invalidate IC
*/
switch (scope)
{
case FLUSH_SCOPE_ALL:
switch (cache)
{
case FLUSH_CACHE_DATA:
__asm__ __volatile__ (".chip 68060\n\t"
"cpusha %dc\n\t"
".chip 68k");
break;
case FLUSH_CACHE_INSN:
__asm__ __volatile__ (".chip 68060\n\t"
"cpusha %ic\n\t"
".chip 68k");
break;
default:
case FLUSH_CACHE_BOTH:
__asm__ __volatile__ (".chip 68060\n\t"
"cpusha %bc\n\t"
".chip 68k");
break;
}
break;
case FLUSH_SCOPE_LINE:
/* Find the physical address of the first mapped page in the
address range. */
len += addr & 15;
addr &= -16;
if (!(paddr = virt_to_phys_060(addr))) {
unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
if (len <= tmp)
return 0;
addr += tmp;
len -= tmp;
tmp = PAGE_SIZE;
for (;;)
{
if ((paddr = virt_to_phys_060(addr)))
break;
if (len <= tmp)
return 0;
addr += tmp;
len -= tmp;
}
}
len = (len + 15) >> 4;
i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
while (len--)
{
switch (cache)
{
case FLUSH_CACHE_DATA:
__asm__ __volatile__ (".chip 68060\n\t"
"cpushl %%dc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
case FLUSH_CACHE_INSN:
__asm__ __volatile__ (".chip 68060\n\t"
"cpushl %%ic,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
default:
case FLUSH_CACHE_BOTH:
__asm__ __volatile__ (".chip 68060\n\t"
"cpushl %%bc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
}
if (!--i && len)
{
/*
* We just want to jump to the first cache line
* in the next page.
*/
addr += PAGE_SIZE;
addr &= PAGE_MASK;
i = PAGE_SIZE / 16;
/* Recompute physical address when crossing a page
boundary. */
for (;;)
{
if ((paddr = virt_to_phys_060(addr)))
break;
if (len <= i)
return 0;
len -= i;
addr += PAGE_SIZE;
}
}
else
paddr += 16;
}
break;
default:
case FLUSH_SCOPE_PAGE:
len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
addr &= PAGE_MASK; /* Workaround for bug in some
revisions of the 68060 */
for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
{
if (!(paddr = virt_to_phys_060(addr)))
continue;
switch (cache)
{
case FLUSH_CACHE_DATA:
__asm__ __volatile__ (".chip 68060\n\t"
"cpushp %%dc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
case FLUSH_CACHE_INSN:
__asm__ __volatile__ (".chip 68060\n\t"
"cpushp %%ic,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
default:
case FLUSH_CACHE_BOTH:
__asm__ __volatile__ (".chip 68060\n\t"
"cpushp %%bc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
}
}
break;
}
return 0;
}
/* sys_cacheflush -- flush (part of) the processor cache. */
asmlinkage int
sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
{
struct vm_area_struct *vma;
int ret = -EINVAL;
if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
cache & ~FLUSH_CACHE_BOTH)
goto out;
if (scope == FLUSH_SCOPE_ALL) {
/* Only the superuser may explicitly flush the whole cache. */
ret = -EPERM;
if (!capable(CAP_SYS_ADMIN))
goto out;
} else {
/*
* Verify that the specified address region actually belongs
* to this process.
*/
vma = find_vma (current->mm, addr);
ret = -EINVAL;
/* Check for overflow. */
if (addr + len < addr)
goto out;
if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
goto out;
}
if (CPU_IS_020_OR_030) {
if (scope == FLUSH_SCOPE_LINE && len < 256) {
unsigned long cacr;
__asm__ ("movec %%cacr, %0" : "=r" (cacr));
if (cache & FLUSH_CACHE_INSN)
cacr |= 4;
if (cache & FLUSH_CACHE_DATA)
cacr |= 0x400;
len >>= 2;
while (len--) {
__asm__ __volatile__ ("movec %1, %%caar\n\t"
"movec %0, %%cacr"
: /* no outputs */
: "r" (cacr), "r" (addr));
addr += 4;
}
} else {
/* Flush the whole cache, even if page granularity requested. */
unsigned long cacr;
__asm__ ("movec %%cacr, %0" : "=r" (cacr));
if (cache & FLUSH_CACHE_INSN)
cacr |= 8;
if (cache & FLUSH_CACHE_DATA)
cacr |= 0x800;
__asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
}
ret = 0;
goto out;
} else {
/*
* 040 or 060: don't blindly trust 'scope', someone could
* try to flush a few megs of memory.
*/
if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
scope=FLUSH_SCOPE_PAGE;
if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
scope=FLUSH_SCOPE_ALL;
if (CPU_IS_040) {
ret = cache_flush_040 (addr, scope, cache, len);
} else if (CPU_IS_060) {
ret = cache_flush_060 (addr, scope, cache, len);
}
}
out:
return ret;
}
asmlinkage int sys_getpagesize(void)
{
return PAGE_SIZE;
}
/*
* Do a system call from kernel instead of calling sys_execve so we
* end up with proper pt_regs.
*/
int kernel_execve(const char *filename,
const char *const argv[],
const char *const envp[])
{
register long __res asm ("%d0") = __NR_execve;
register long __a asm ("%d1") = (long)(filename);
register long __b asm ("%d2") = (long)(argv);
register long __c asm ("%d3") = (long)(envp);
asm volatile ("trap #0" : "+d" (__res)
: "d" (__a), "d" (__b), "d" (__c));
return __res;
}
asmlinkage unsigned long sys_get_thread_area(void)
{
return current_thread_info()->tp_value;
}
asmlinkage int sys_set_thread_area(unsigned long tp)
{
current_thread_info()->tp_value = tp;
return 0;
}
/* This syscall gets its arguments in A0 (mem), D2 (oldval) and
D1 (newval). */
asmlinkage int
sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
unsigned long __user * mem)
{
/* This was borrowed from ARM's implementation. */
for (;;) {
struct mm_struct *mm = current->mm;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
spinlock_t *ptl;
unsigned long mem_value;
down_read(&mm->mmap_sem);
pgd = pgd_offset(mm, (unsigned long)mem);
if (!pgd_present(*pgd))
goto bad_access;
pmd = pmd_offset(pgd, (unsigned long)mem);
if (!pmd_present(*pmd))
goto bad_access;
pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl);
if (!pte_present(*pte) || !pte_dirty(*pte)
|| !pte_write(*pte)) {
pte_unmap_unlock(pte, ptl);
goto bad_access;
}
mem_value = *mem;
if (mem_value == oldval)
*mem = newval;
pte_unmap_unlock(pte, ptl);
up_read(&mm->mmap_sem);
return mem_value;
bad_access:
up_read(&mm->mmap_sem);
/* This is not necessarily a bad access, we can get here if
a memory we're trying to write to should be copied-on-write.
Make the kernel do the necessary page stuff, then re-iterate.
Simulate a write access fault to do that. */
{
/* The first argument of the function corresponds to
D1, which is the first field of struct pt_regs. */
struct pt_regs *fp = (struct pt_regs *)&newval;
/* '3' is an RMW flag. */
if (do_page_fault(fp, (unsigned long)mem, 3))
/* If the do_page_fault() failed, we don't
have anything meaningful to return.
There should be a SIGSEGV pending for
the process. */
return 0xdeadbeef;
}
}
}
asmlinkage int sys_atomic_barrier(void)
{
/* no code needed for uniprocs */
return 0;
}
/*
* linux/arch/m68k/kernel/sys_m68k.c
*
* This file contains various random system calls that
* have a non-standard calling sequence on the Linux/m68k
* platform.
*/
#include <linux/capability.h>
#include <linux/errno.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/fs.h>
#include <linux/smp.h>
#include <linux/sem.h>
#include <linux/msg.h>
#include <linux/shm.h>
#include <linux/stat.h>
#include <linux/syscalls.h>
#include <linux/mman.h>
#include <linux/file.h>
#include <linux/ipc.h>
#include <asm/setup.h>
#include <asm/uaccess.h>
#include <asm/cachectl.h>
#include <asm/traps.h>
#include <asm/page.h>
#include <asm/unistd.h>
#include <linux/elf.h>
#include <asm/tlb.h>
asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
unsigned long error_code);
asmlinkage long sys_mmap2(unsigned long addr, unsigned long len,
unsigned long prot, unsigned long flags,
unsigned long fd, unsigned long pgoff)
{
/*
* This is wrong for sun3 - there PAGE_SIZE is 8Kb,
* so we need to shift the argument down by 1; m68k mmap64(3)
* (in libc) expects the last argument of mmap2 in 4Kb units.
*/
return sys_mmap_pgoff(addr, len, prot, flags, fd, pgoff);
}
/* Convert virtual (user) address VADDR to physical address PADDR */
#define virt_to_phys_040(vaddr) \
({ \
unsigned long _mmusr, _paddr; \
\
__asm__ __volatile__ (".chip 68040\n\t" \
"ptestr (%1)\n\t" \
"movec %%mmusr,%0\n\t" \
".chip 68k" \
: "=r" (_mmusr) \
: "a" (vaddr)); \
_paddr = (_mmusr & MMU_R_040) ? (_mmusr & PAGE_MASK) : 0; \
_paddr; \
})
static inline int
cache_flush_040 (unsigned long addr, int scope, int cache, unsigned long len)
{
unsigned long paddr, i;
switch (scope)
{
case FLUSH_SCOPE_ALL:
switch (cache)
{
case FLUSH_CACHE_DATA:
/* This nop is needed for some broken versions of the 68040. */
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpusha %dc\n\t"
".chip 68k");
break;
case FLUSH_CACHE_INSN:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpusha %ic\n\t"
".chip 68k");
break;
default:
case FLUSH_CACHE_BOTH:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpusha %bc\n\t"
".chip 68k");
break;
}
break;
case FLUSH_SCOPE_LINE:
/* Find the physical address of the first mapped page in the
address range. */
if ((paddr = virt_to_phys_040(addr))) {
paddr += addr & ~(PAGE_MASK | 15);
len = (len + (addr & 15) + 15) >> 4;
} else {
unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
if (len <= tmp)
return 0;
addr += tmp;
len -= tmp;
tmp = PAGE_SIZE;
for (;;)
{
if ((paddr = virt_to_phys_040(addr)))
break;
if (len <= tmp)
return 0;
addr += tmp;
len -= tmp;
}
len = (len + 15) >> 4;
}
i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
while (len--)
{
switch (cache)
{
case FLUSH_CACHE_DATA:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpushl %%dc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
case FLUSH_CACHE_INSN:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpushl %%ic,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
default:
case FLUSH_CACHE_BOTH:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpushl %%bc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
}
if (!--i && len)
{
/*
* No need to page align here since it is done by
* virt_to_phys_040().
*/
addr += PAGE_SIZE;
i = PAGE_SIZE / 16;
/* Recompute physical address when crossing a page
boundary. */
for (;;)
{
if ((paddr = virt_to_phys_040(addr)))
break;
if (len <= i)
return 0;
len -= i;
addr += PAGE_SIZE;
}
}
else
paddr += 16;
}
break;
default:
case FLUSH_SCOPE_PAGE:
len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
{
if (!(paddr = virt_to_phys_040(addr)))
continue;
switch (cache)
{
case FLUSH_CACHE_DATA:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpushp %%dc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
case FLUSH_CACHE_INSN:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpushp %%ic,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
default:
case FLUSH_CACHE_BOTH:
__asm__ __volatile__ ("nop\n\t"
".chip 68040\n\t"
"cpushp %%bc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
}
}
break;
}
return 0;
}
#define virt_to_phys_060(vaddr) \
({ \
unsigned long paddr; \
__asm__ __volatile__ (".chip 68060\n\t" \
"plpar (%0)\n\t" \
".chip 68k" \
: "=a" (paddr) \
: "0" (vaddr)); \
(paddr); /* XXX */ \
})
static inline int
cache_flush_060 (unsigned long addr, int scope, int cache, unsigned long len)
{
unsigned long paddr, i;
/*
* 68060 manual says:
* cpush %dc : flush DC, remains valid (with our %cacr setup)
* cpush %ic : invalidate IC
* cpush %bc : flush DC + invalidate IC
*/
switch (scope)
{
case FLUSH_SCOPE_ALL:
switch (cache)
{
case FLUSH_CACHE_DATA:
__asm__ __volatile__ (".chip 68060\n\t"
"cpusha %dc\n\t"
".chip 68k");
break;
case FLUSH_CACHE_INSN:
__asm__ __volatile__ (".chip 68060\n\t"
"cpusha %ic\n\t"
".chip 68k");
break;
default:
case FLUSH_CACHE_BOTH:
__asm__ __volatile__ (".chip 68060\n\t"
"cpusha %bc\n\t"
".chip 68k");
break;
}
break;
case FLUSH_SCOPE_LINE:
/* Find the physical address of the first mapped page in the
address range. */
len += addr & 15;
addr &= -16;
if (!(paddr = virt_to_phys_060(addr))) {
unsigned long tmp = PAGE_SIZE - (addr & ~PAGE_MASK);
if (len <= tmp)
return 0;
addr += tmp;
len -= tmp;
tmp = PAGE_SIZE;
for (;;)
{
if ((paddr = virt_to_phys_060(addr)))
break;
if (len <= tmp)
return 0;
addr += tmp;
len -= tmp;
}
}
len = (len + 15) >> 4;
i = (PAGE_SIZE - (paddr & ~PAGE_MASK)) >> 4;
while (len--)
{
switch (cache)
{
case FLUSH_CACHE_DATA:
__asm__ __volatile__ (".chip 68060\n\t"
"cpushl %%dc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
case FLUSH_CACHE_INSN:
__asm__ __volatile__ (".chip 68060\n\t"
"cpushl %%ic,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
default:
case FLUSH_CACHE_BOTH:
__asm__ __volatile__ (".chip 68060\n\t"
"cpushl %%bc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
}
if (!--i && len)
{
/*
* We just want to jump to the first cache line
* in the next page.
*/
addr += PAGE_SIZE;
addr &= PAGE_MASK;
i = PAGE_SIZE / 16;
/* Recompute physical address when crossing a page
boundary. */
for (;;)
{
if ((paddr = virt_to_phys_060(addr)))
break;
if (len <= i)
return 0;
len -= i;
addr += PAGE_SIZE;
}
}
else
paddr += 16;
}
break;
default:
case FLUSH_SCOPE_PAGE:
len += (addr & ~PAGE_MASK) + (PAGE_SIZE - 1);
addr &= PAGE_MASK; /* Workaround for bug in some
revisions of the 68060 */
for (len >>= PAGE_SHIFT; len--; addr += PAGE_SIZE)
{
if (!(paddr = virt_to_phys_060(addr)))
continue;
switch (cache)
{
case FLUSH_CACHE_DATA:
__asm__ __volatile__ (".chip 68060\n\t"
"cpushp %%dc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
case FLUSH_CACHE_INSN:
__asm__ __volatile__ (".chip 68060\n\t"
"cpushp %%ic,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
default:
case FLUSH_CACHE_BOTH:
__asm__ __volatile__ (".chip 68060\n\t"
"cpushp %%bc,(%0)\n\t"
".chip 68k"
: : "a" (paddr));
break;
}
}
break;
}
return 0;
}
/* sys_cacheflush -- flush (part of) the processor cache. */
asmlinkage int
sys_cacheflush (unsigned long addr, int scope, int cache, unsigned long len)
{
struct vm_area_struct *vma;
int ret = -EINVAL;
if (scope < FLUSH_SCOPE_LINE || scope > FLUSH_SCOPE_ALL ||
cache & ~FLUSH_CACHE_BOTH)
goto out;
if (scope == FLUSH_SCOPE_ALL) {
/* Only the superuser may explicitly flush the whole cache. */
ret = -EPERM;
if (!capable(CAP_SYS_ADMIN))
goto out;
} else {
/*
* Verify that the specified address region actually belongs
* to this process.
*/
vma = find_vma (current->mm, addr);
ret = -EINVAL;
/* Check for overflow. */
if (addr + len < addr)
goto out;
if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end)
goto out;
}
if (CPU_IS_020_OR_030) {
if (scope == FLUSH_SCOPE_LINE && len < 256) {
unsigned long cacr;
__asm__ ("movec %%cacr, %0" : "=r" (cacr));
if (cache & FLUSH_CACHE_INSN)
cacr |= 4;
if (cache & FLUSH_CACHE_DATA)
cacr |= 0x400;
len >>= 2;
while (len--) {
__asm__ __volatile__ ("movec %1, %%caar\n\t"
"movec %0, %%cacr"
: /* no outputs */
: "r" (cacr), "r" (addr));
addr += 4;
}
} else {
/* Flush the whole cache, even if page granularity requested. */
unsigned long cacr;
__asm__ ("movec %%cacr, %0" : "=r" (cacr));
if (cache & FLUSH_CACHE_INSN)
cacr |= 8;
if (cache & FLUSH_CACHE_DATA)
cacr |= 0x800;
__asm__ __volatile__ ("movec %0, %%cacr" : : "r" (cacr));
}
ret = 0;
goto out;
} else {
/*
* 040 or 060: don't blindly trust 'scope', someone could
* try to flush a few megs of memory.
*/
if (len>=3*PAGE_SIZE && scope<FLUSH_SCOPE_PAGE)
scope=FLUSH_SCOPE_PAGE;
if (len>=10*PAGE_SIZE && scope<FLUSH_SCOPE_ALL)
scope=FLUSH_SCOPE_ALL;
if (CPU_IS_040) {
ret = cache_flush_040 (addr, scope, cache, len);
} else if (CPU_IS_060) {
ret = cache_flush_060 (addr, scope, cache, len);
}
}
out:
return ret;
}
asmlinkage int sys_getpagesize(void)
{
return PAGE_SIZE;
}
/*
* Do a system call from kernel instead of calling sys_execve so we
* end up with proper pt_regs.
*/
int kernel_execve(const char *filename,
const char *const argv[],
const char *const envp[])
{
register long __res asm ("%d0") = __NR_execve;
register long __a asm ("%d1") = (long)(filename);
register long __b asm ("%d2") = (long)(argv);
register long __c asm ("%d3") = (long)(envp);
asm volatile ("trap #0" : "+d" (__res)
: "d" (__a), "d" (__b), "d" (__c));
return __res;
}
asmlinkage unsigned long sys_get_thread_area(void)
{
return current_thread_info()->tp_value;
}
asmlinkage int sys_set_thread_area(unsigned long tp)
{
current_thread_info()->tp_value = tp;
return 0;
}
/* This syscall gets its arguments in A0 (mem), D2 (oldval) and
D1 (newval). */
asmlinkage int
sys_atomic_cmpxchg_32(unsigned long newval, int oldval, int d3, int d4, int d5,
unsigned long __user * mem)
{
/* This was borrowed from ARM's implementation. */
for (;;) {
struct mm_struct *mm = current->mm;
pgd_t *pgd;
pmd_t *pmd;
pte_t *pte;
spinlock_t *ptl;
unsigned long mem_value;
down_read(&mm->mmap_sem);
pgd = pgd_offset(mm, (unsigned long)mem);
if (!pgd_present(*pgd))
goto bad_access;
pmd = pmd_offset(pgd, (unsigned long)mem);
if (!pmd_present(*pmd))
goto bad_access;
pte = pte_offset_map_lock(mm, pmd, (unsigned long)mem, &ptl);
if (!pte_present(*pte) || !pte_dirty(*pte)
|| !pte_write(*pte)) {
pte_unmap_unlock(pte, ptl);
goto bad_access;
}
mem_value = *mem;
if (mem_value == oldval)
*mem = newval;
pte_unmap_unlock(pte, ptl);
up_read(&mm->mmap_sem);
return mem_value;
bad_access:
up_read(&mm->mmap_sem);
/* This is not necessarily a bad access, we can get here if
a memory we're trying to write to should be copied-on-write.
Make the kernel do the necessary page stuff, then re-iterate.
Simulate a write access fault to do that. */
{
/* The first argument of the function corresponds to
D1, which is the first field of struct pt_regs. */
struct pt_regs *fp = (struct pt_regs *)&newval;
/* '3' is an RMW flag. */
if (do_page_fault(fp, (unsigned long)mem, 3))
/* If the do_page_fault() failed, we don't
have anything meaningful to return.
There should be a SIGSEGV pending for
the process. */
return 0xdeadbeef;
}
}
}
asmlinkage int sys_atomic_barrier(void)
{
/* no code needed for uniprocs */
return 0;
}
/* #ifdef CONFIG_MMU
* linux/arch/m68k/kernel/time.c #include "time_mm.c"
* #else
* Copyright (C) 1991, 1992, 1995 Linus Torvalds #include "time_no.c"
* #endif
* This file contains the m68k-specific time handling details.
* Most of the stuff is located in the machine specific files.
*
* 1997-09-10 Updated NTP code according to technical memorandum Jan '96
* "A Kernel Model for Precision Timekeeping" by Dave Mills
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/rtc.h>
#include <linux/platform_device.h>
#include <asm/machdep.h>
#include <asm/io.h>
#include <asm/irq_regs.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/profile.h>
static inline int set_rtc_mmss(unsigned long nowtime)
{
if (mach_set_clock_mmss)
return mach_set_clock_mmss (nowtime);
return -1;
}
/*
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "xtime_update()" routine every clocktick
*/
static irqreturn_t timer_interrupt(int irq, void *dummy)
{
xtime_update(1);
update_process_times(user_mode(get_irq_regs()));
profile_tick(CPU_PROFILING);
#ifdef CONFIG_HEARTBEAT
/* use power LED as a heartbeat instead -- much more useful
for debugging -- based on the version for PReP by Cort */
/* acts like an actual heart beat -- ie thump-thump-pause... */
if (mach_heartbeat) {
static unsigned cnt = 0, period = 0, dist = 0;
if (cnt == 0 || cnt == dist)
mach_heartbeat( 1 );
else if (cnt == 7 || cnt == dist+7)
mach_heartbeat( 0 );
if (++cnt > period) {
cnt = 0;
/* The hyperbolic function below modifies the heartbeat period
* length in dependency of the current (5min) load. It goes
* through the points f(0)=126, f(1)=86, f(5)=51,
* f(inf)->30. */
period = ((672<<FSHIFT)/(5*avenrun[0]+(7<<FSHIFT))) + 30;
dist = period / 4;
}
}
#endif /* CONFIG_HEARTBEAT */
return IRQ_HANDLED;
}
void read_persistent_clock(struct timespec *ts)
{
struct rtc_time time;
ts->tv_sec = 0;
ts->tv_nsec = 0;
if (mach_hwclk) {
mach_hwclk(0, &time);
if ((time.tm_year += 1900) < 1970)
time.tm_year += 100;
ts->tv_sec = mktime(time.tm_year, time.tm_mon, time.tm_mday,
time.tm_hour, time.tm_min, time.tm_sec);
}
}
void __init time_init(void)
{
mach_sched_init(timer_interrupt);
}
u32 arch_gettimeoffset(void)
{
return mach_gettimeoffset() * 1000;
}
static int __init rtc_init(void)
{
struct platform_device *pdev;
if (!mach_hwclk)
return -ENODEV;
pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
return 0;
}
module_init(rtc_init);
/*
* linux/arch/m68k/kernel/time.c
*
* Copyright (C) 1991, 1992, 1995 Linus Torvalds
*
* This file contains the m68k-specific time handling details.
* Most of the stuff is located in the machine specific files.
*
* 1997-09-10 Updated NTP code according to technical memorandum Jan '96
* "A Kernel Model for Precision Timekeeping" by Dave Mills
*/
#include <linux/errno.h>
#include <linux/module.h>
#include <linux/sched.h>
#include <linux/kernel.h>
#include <linux/param.h>
#include <linux/string.h>
#include <linux/mm.h>
#include <linux/rtc.h>
#include <linux/platform_device.h>
#include <asm/machdep.h>
#include <asm/io.h>
#include <asm/irq_regs.h>
#include <linux/time.h>
#include <linux/timex.h>
#include <linux/profile.h>
static inline int set_rtc_mmss(unsigned long nowtime)
{
if (mach_set_clock_mmss)
return mach_set_clock_mmss (nowtime);
return -1;
}
/*
* timer_interrupt() needs to keep up the real-time clock,
* as well as call the "xtime_update()" routine every clocktick
*/
static irqreturn_t timer_interrupt(int irq, void *dummy)
{
xtime_update(1);
update_process_times(user_mode(get_irq_regs()));
profile_tick(CPU_PROFILING);
#ifdef CONFIG_HEARTBEAT
/* use power LED as a heartbeat instead -- much more useful
for debugging -- based on the version for PReP by Cort */
/* acts like an actual heart beat -- ie thump-thump-pause... */
if (mach_heartbeat) {
static unsigned cnt = 0, period = 0, dist = 0;
if (cnt == 0 || cnt == dist)
mach_heartbeat( 1 );
else if (cnt == 7 || cnt == dist+7)
mach_heartbeat( 0 );
if (++cnt > period) {
cnt = 0;
/* The hyperbolic function below modifies the heartbeat period
* length in dependency of the current (5min) load. It goes
* through the points f(0)=126, f(1)=86, f(5)=51,
* f(inf)->30. */
period = ((672<<FSHIFT)/(5*avenrun[0]+(7<<FSHIFT))) + 30;
dist = period / 4;
}
}
#endif /* CONFIG_HEARTBEAT */
return IRQ_HANDLED;
}
void read_persistent_clock(struct timespec *ts)
{
struct rtc_time time;
ts->tv_sec = 0;
ts->tv_nsec = 0;
if (mach_hwclk) {
mach_hwclk(0, &time);
if ((time.tm_year += 1900) < 1970)
time.tm_year += 100;
ts->tv_sec = mktime(time.tm_year, time.tm_mon, time.tm_mday,
time.tm_hour, time.tm_min, time.tm_sec);
}
}
void __init time_init(void)
{
mach_sched_init(timer_interrupt);
}
u32 arch_gettimeoffset(void)
{
return mach_gettimeoffset() * 1000;
}
static int __init rtc_init(void)
{
struct platform_device *pdev;
if (!mach_hwclk)
return -ENODEV;
pdev = platform_device_register_simple("rtc-generic", -1, NULL, 0);
if (IS_ERR(pdev))
return PTR_ERR(pdev);
return 0;
}
module_init(rtc_init);
/* #ifdef CONFIG_MMU
* linux/arch/m68k/kernel/traps.c #include "traps_mm.c"
*
* Copyright (C) 1993, 1994 by Hamish Macdonald
*
* 68040 fixes by Michael Rausch
* 68040 fixes by Martin Apel
* 68040 fixes and writeback by Richard Zidlicky
* 68060 fixes by Roman Hodek
* 68060 fixes by Jesper Skov
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
/*
* Sets up all exception vectors
*/
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/user.h>
#include <linux/string.h>
#include <linux/linkage.h>
#include <linux/init.h>
#include <linux/ptrace.h>
#include <linux/kallsyms.h>
#include <asm/setup.h>
#include <asm/fpu.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/traps.h>
#include <asm/pgalloc.h>
#include <asm/machdep.h>
#include <asm/siginfo.h>
/* assembler routines */
asmlinkage void system_call(void);
asmlinkage void buserr(void);
asmlinkage void trap(void);
asmlinkage void nmihandler(void);
#ifdef CONFIG_M68KFPU_EMU
asmlinkage void fpu_emu(void);
#endif
e_vector vectors[256];
/* nmi handler for the Amiga */
asm(".text\n"
__ALIGN_STR "\n"
"nmihandler: rte");
/*
* this must be called very early as the kernel might
* use some instruction that are emulated on the 060
* and so we're prepared for early probe attempts (e.g. nf_init).
*/
void __init base_trap_init(void)
{
if (MACH_IS_SUN3X) {
extern e_vector *sun3x_prom_vbr;
__asm__ volatile ("movec %%vbr, %0" : "=r" (sun3x_prom_vbr));
}
/* setup the exception vector table */
__asm__ volatile ("movec %0,%%vbr" : : "r" ((void*)vectors));
if (CPU_IS_060) {
/* set up ISP entry points */
asmlinkage void unimp_vec(void) asm ("_060_isp_unimp");
vectors[VEC_UNIMPII] = unimp_vec;
}
vectors[VEC_BUSERR] = buserr;
vectors[VEC_ILLEGAL] = trap;
vectors[VEC_SYS] = system_call;
}
void __init trap_init (void)
{
int i;
for (i = VEC_SPUR; i <= VEC_INT7; i++)
vectors[i] = bad_inthandler;
for (i = 0; i < VEC_USER; i++)
if (!vectors[i])
vectors[i] = trap;
for (i = VEC_USER; i < 256; i++)
vectors[i] = bad_inthandler;
#ifdef CONFIG_M68KFPU_EMU
if (FPU_IS_EMU)
vectors[VEC_LINE11] = fpu_emu;
#endif
if (CPU_IS_040 && !FPU_IS_EMU) {
/* set up FPSP entry points */
asmlinkage void dz_vec(void) asm ("dz");
asmlinkage void inex_vec(void) asm ("inex");
asmlinkage void ovfl_vec(void) asm ("ovfl");
asmlinkage void unfl_vec(void) asm ("unfl");
asmlinkage void snan_vec(void) asm ("snan");
asmlinkage void operr_vec(void) asm ("operr");
asmlinkage void bsun_vec(void) asm ("bsun");
asmlinkage void fline_vec(void) asm ("fline");
asmlinkage void unsupp_vec(void) asm ("unsupp");
vectors[VEC_FPDIVZ] = dz_vec;
vectors[VEC_FPIR] = inex_vec;
vectors[VEC_FPOVER] = ovfl_vec;
vectors[VEC_FPUNDER] = unfl_vec;
vectors[VEC_FPNAN] = snan_vec;
vectors[VEC_FPOE] = operr_vec;
vectors[VEC_FPBRUC] = bsun_vec;
vectors[VEC_LINE11] = fline_vec;
vectors[VEC_FPUNSUP] = unsupp_vec;
}
if (CPU_IS_060 && !FPU_IS_EMU) {
/* set up IFPSP entry points */
asmlinkage void snan_vec6(void) asm ("_060_fpsp_snan");
asmlinkage void operr_vec6(void) asm ("_060_fpsp_operr");
asmlinkage void ovfl_vec6(void) asm ("_060_fpsp_ovfl");
asmlinkage void unfl_vec6(void) asm ("_060_fpsp_unfl");
asmlinkage void dz_vec6(void) asm ("_060_fpsp_dz");
asmlinkage void inex_vec6(void) asm ("_060_fpsp_inex");
asmlinkage void fline_vec6(void) asm ("_060_fpsp_fline");
asmlinkage void unsupp_vec6(void) asm ("_060_fpsp_unsupp");
asmlinkage void effadd_vec6(void) asm ("_060_fpsp_effadd");
vectors[VEC_FPNAN] = snan_vec6;
vectors[VEC_FPOE] = operr_vec6;
vectors[VEC_FPOVER] = ovfl_vec6;
vectors[VEC_FPUNDER] = unfl_vec6;
vectors[VEC_FPDIVZ] = dz_vec6;
vectors[VEC_FPIR] = inex_vec6;
vectors[VEC_LINE11] = fline_vec6;
vectors[VEC_FPUNSUP] = unsupp_vec6;
vectors[VEC_UNIMPEA] = effadd_vec6;
}
/* if running on an amiga, make the NMI interrupt do nothing */
if (MACH_IS_AMIGA) {
vectors[VEC_INT7] = nmihandler;
}
}
static const char *vec_names[] = {
[VEC_RESETSP] = "RESET SP",
[VEC_RESETPC] = "RESET PC",
[VEC_BUSERR] = "BUS ERROR",
[VEC_ADDRERR] = "ADDRESS ERROR",
[VEC_ILLEGAL] = "ILLEGAL INSTRUCTION",
[VEC_ZERODIV] = "ZERO DIVIDE",
[VEC_CHK] = "CHK",
[VEC_TRAP] = "TRAPcc",
[VEC_PRIV] = "PRIVILEGE VIOLATION",
[VEC_TRACE] = "TRACE",
[VEC_LINE10] = "LINE 1010",
[VEC_LINE11] = "LINE 1111",
[VEC_RESV12] = "UNASSIGNED RESERVED 12",
[VEC_COPROC] = "COPROCESSOR PROTOCOL VIOLATION",
[VEC_FORMAT] = "FORMAT ERROR",
[VEC_UNINT] = "UNINITIALIZED INTERRUPT",
[VEC_RESV16] = "UNASSIGNED RESERVED 16",
[VEC_RESV17] = "UNASSIGNED RESERVED 17",
[VEC_RESV18] = "UNASSIGNED RESERVED 18",
[VEC_RESV19] = "UNASSIGNED RESERVED 19",
[VEC_RESV20] = "UNASSIGNED RESERVED 20",
[VEC_RESV21] = "UNASSIGNED RESERVED 21",
[VEC_RESV22] = "UNASSIGNED RESERVED 22",
[VEC_RESV23] = "UNASSIGNED RESERVED 23",
[VEC_SPUR] = "SPURIOUS INTERRUPT",
[VEC_INT1] = "LEVEL 1 INT",
[VEC_INT2] = "LEVEL 2 INT",
[VEC_INT3] = "LEVEL 3 INT",
[VEC_INT4] = "LEVEL 4 INT",
[VEC_INT5] = "LEVEL 5 INT",
[VEC_INT6] = "LEVEL 6 INT",
[VEC_INT7] = "LEVEL 7 INT",
[VEC_SYS] = "SYSCALL",
[VEC_TRAP1] = "TRAP #1",
[VEC_TRAP2] = "TRAP #2",
[VEC_TRAP3] = "TRAP #3",
[VEC_TRAP4] = "TRAP #4",
[VEC_TRAP5] = "TRAP #5",
[VEC_TRAP6] = "TRAP #6",
[VEC_TRAP7] = "TRAP #7",
[VEC_TRAP8] = "TRAP #8",
[VEC_TRAP9] = "TRAP #9",
[VEC_TRAP10] = "TRAP #10",
[VEC_TRAP11] = "TRAP #11",
[VEC_TRAP12] = "TRAP #12",
[VEC_TRAP13] = "TRAP #13",
[VEC_TRAP14] = "TRAP #14",
[VEC_TRAP15] = "TRAP #15",
[VEC_FPBRUC] = "FPCP BSUN",
[VEC_FPIR] = "FPCP INEXACT",
[VEC_FPDIVZ] = "FPCP DIV BY 0",
[VEC_FPUNDER] = "FPCP UNDERFLOW",
[VEC_FPOE] = "FPCP OPERAND ERROR",
[VEC_FPOVER] = "FPCP OVERFLOW",
[VEC_FPNAN] = "FPCP SNAN",
[VEC_FPUNSUP] = "FPCP UNSUPPORTED OPERATION",
[VEC_MMUCFG] = "MMU CONFIGURATION ERROR",
[VEC_MMUILL] = "MMU ILLEGAL OPERATION ERROR",
[VEC_MMUACC] = "MMU ACCESS LEVEL VIOLATION ERROR",
[VEC_RESV59] = "UNASSIGNED RESERVED 59",
[VEC_UNIMPEA] = "UNASSIGNED RESERVED 60",
[VEC_UNIMPII] = "UNASSIGNED RESERVED 61",
[VEC_RESV62] = "UNASSIGNED RESERVED 62",
[VEC_RESV63] = "UNASSIGNED RESERVED 63",
};
static const char *space_names[] = {
[0] = "Space 0",
[USER_DATA] = "User Data",
[USER_PROGRAM] = "User Program",
#ifndef CONFIG_SUN3
[3] = "Space 3",
#else #else
[FC_CONTROL] = "Control", #include "traps_no.c"
#endif
[4] = "Space 4",
[SUPER_DATA] = "Super Data",
[SUPER_PROGRAM] = "Super Program",
[CPU_SPACE] = "CPU"
};
void die_if_kernel(char *,struct pt_regs *,int);
asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
unsigned long error_code);
int send_fault_sig(struct pt_regs *regs);
asmlinkage void trap_c(struct frame *fp);
#if defined (CONFIG_M68060)
static inline void access_error060 (struct frame *fp)
{
unsigned long fslw = fp->un.fmt4.pc; /* is really FSLW for access error */
#ifdef DEBUG
printk("fslw=%#lx, fa=%#lx\n", fslw, fp->un.fmt4.effaddr);
#endif
if (fslw & MMU060_BPE) {
/* branch prediction error -> clear branch cache */
__asm__ __volatile__ ("movec %/cacr,%/d0\n\t"
"orl #0x00400000,%/d0\n\t"
"movec %/d0,%/cacr"
: : : "d0" );
/* return if there's no other error */
if (!(fslw & MMU060_ERR_BITS) && !(fslw & MMU060_SEE))
return;
}
if (fslw & (MMU060_DESC_ERR | MMU060_WP | MMU060_SP)) {
unsigned long errorcode;
unsigned long addr = fp->un.fmt4.effaddr;
if (fslw & MMU060_MA)
addr = (addr + PAGE_SIZE - 1) & PAGE_MASK;
errorcode = 1;
if (fslw & MMU060_DESC_ERR) {
__flush_tlb040_one(addr);
errorcode = 0;
}
if (fslw & MMU060_W)
errorcode |= 2;
#ifdef DEBUG
printk("errorcode = %d\n", errorcode );
#endif
do_page_fault(&fp->ptregs, addr, errorcode);
} else if (fslw & (MMU060_SEE)){
/* Software Emulation Error.
* fault during mem_read/mem_write in ifpsp060/os.S
*/
send_fault_sig(&fp->ptregs);
} else if (!(fslw & (MMU060_RE|MMU060_WE)) ||
send_fault_sig(&fp->ptregs) > 0) {
printk("pc=%#lx, fa=%#lx\n", fp->ptregs.pc, fp->un.fmt4.effaddr);
printk( "68060 access error, fslw=%lx\n", fslw );
trap_c( fp );
}
}
#endif /* CONFIG_M68060 */
#if defined (CONFIG_M68040)
static inline unsigned long probe040(int iswrite, unsigned long addr, int wbs)
{
unsigned long mmusr;
mm_segment_t old_fs = get_fs();
set_fs(MAKE_MM_SEG(wbs));
if (iswrite)
asm volatile (".chip 68040; ptestw (%0); .chip 68k" : : "a" (addr));
else
asm volatile (".chip 68040; ptestr (%0); .chip 68k" : : "a" (addr));
asm volatile (".chip 68040; movec %%mmusr,%0; .chip 68k" : "=r" (mmusr));
set_fs(old_fs);
return mmusr;
}
static inline int do_040writeback1(unsigned short wbs, unsigned long wba,
unsigned long wbd)
{
int res = 0;
mm_segment_t old_fs = get_fs();
/* set_fs can not be moved, otherwise put_user() may oops */
set_fs(MAKE_MM_SEG(wbs));
switch (wbs & WBSIZ_040) {
case BA_SIZE_BYTE:
res = put_user(wbd & 0xff, (char __user *)wba);
break;
case BA_SIZE_WORD:
res = put_user(wbd & 0xffff, (short __user *)wba);
break;
case BA_SIZE_LONG:
res = put_user(wbd, (int __user *)wba);
break;
}
/* set_fs can not be moved, otherwise put_user() may oops */
set_fs(old_fs);
#ifdef DEBUG
printk("do_040writeback1, res=%d\n",res);
#endif
return res;
}
/* after an exception in a writeback the stack frame corresponding
* to that exception is discarded, set a few bits in the old frame
* to simulate what it should look like
*/
static inline void fix_xframe040(struct frame *fp, unsigned long wba, unsigned short wbs)
{
fp->un.fmt7.faddr = wba;
fp->un.fmt7.ssw = wbs & 0xff;
if (wba != current->thread.faddr)
fp->un.fmt7.ssw |= MA_040;
}
static inline void do_040writebacks(struct frame *fp)
{
int res = 0;
#if 0
if (fp->un.fmt7.wb1s & WBV_040)
printk("access_error040: cannot handle 1st writeback. oops.\n");
#endif
if ((fp->un.fmt7.wb2s & WBV_040) &&
!(fp->un.fmt7.wb2s & WBTT_040)) {
res = do_040writeback1(fp->un.fmt7.wb2s, fp->un.fmt7.wb2a,
fp->un.fmt7.wb2d);
if (res)
fix_xframe040(fp, fp->un.fmt7.wb2a, fp->un.fmt7.wb2s);
else
fp->un.fmt7.wb2s = 0;
}
/* do the 2nd wb only if the first one was successful (except for a kernel wb) */
if (fp->un.fmt7.wb3s & WBV_040 && (!res || fp->un.fmt7.wb3s & 4)) {
res = do_040writeback1(fp->un.fmt7.wb3s, fp->un.fmt7.wb3a,
fp->un.fmt7.wb3d);
if (res)
{
fix_xframe040(fp, fp->un.fmt7.wb3a, fp->un.fmt7.wb3s);
fp->un.fmt7.wb2s = fp->un.fmt7.wb3s;
fp->un.fmt7.wb3s &= (~WBV_040);
fp->un.fmt7.wb2a = fp->un.fmt7.wb3a;
fp->un.fmt7.wb2d = fp->un.fmt7.wb3d;
}
else
fp->un.fmt7.wb3s = 0;
}
if (res)
send_fault_sig(&fp->ptregs);
}
/*
* called from sigreturn(), must ensure userspace code didn't
* manipulate exception frame to circumvent protection, then complete
* pending writebacks
* we just clear TM2 to turn it into a userspace access
*/
asmlinkage void berr_040cleanup(struct frame *fp)
{
fp->un.fmt7.wb2s &= ~4;
fp->un.fmt7.wb3s &= ~4;
do_040writebacks(fp);
}
static inline void access_error040(struct frame *fp)
{
unsigned short ssw = fp->un.fmt7.ssw;
unsigned long mmusr;
#ifdef DEBUG
printk("ssw=%#x, fa=%#lx\n", ssw, fp->un.fmt7.faddr);
printk("wb1s=%#x, wb2s=%#x, wb3s=%#x\n", fp->un.fmt7.wb1s,
fp->un.fmt7.wb2s, fp->un.fmt7.wb3s);
printk ("wb2a=%lx, wb3a=%lx, wb2d=%lx, wb3d=%lx\n",
fp->un.fmt7.wb2a, fp->un.fmt7.wb3a,
fp->un.fmt7.wb2d, fp->un.fmt7.wb3d);
#endif
if (ssw & ATC_040) {
unsigned long addr = fp->un.fmt7.faddr;
unsigned long errorcode;
/*
* The MMU status has to be determined AFTER the address
* has been corrected if there was a misaligned access (MA).
*/
if (ssw & MA_040)
addr = (addr + 7) & -8;
/* MMU error, get the MMUSR info for this access */
mmusr = probe040(!(ssw & RW_040), addr, ssw);
#ifdef DEBUG
printk("mmusr = %lx\n", mmusr);
#endif
errorcode = 1;
if (!(mmusr & MMU_R_040)) {
/* clear the invalid atc entry */
__flush_tlb040_one(addr);
errorcode = 0;
}
/* despite what documentation seems to say, RMW
* accesses have always both the LK and RW bits set */
if (!(ssw & RW_040) || (ssw & LK_040))
errorcode |= 2;
if (do_page_fault(&fp->ptregs, addr, errorcode)) {
#ifdef DEBUG
printk("do_page_fault() !=0\n");
#endif
if (user_mode(&fp->ptregs)){
/* delay writebacks after signal delivery */
#ifdef DEBUG
printk(".. was usermode - return\n");
#endif
return;
}
/* disable writeback into user space from kernel
* (if do_page_fault didn't fix the mapping,
* the writeback won't do good)
*/
disable_wb:
#ifdef DEBUG
printk(".. disabling wb2\n");
#endif
if (fp->un.fmt7.wb2a == fp->un.fmt7.faddr)
fp->un.fmt7.wb2s &= ~WBV_040;
if (fp->un.fmt7.wb3a == fp->un.fmt7.faddr)
fp->un.fmt7.wb3s &= ~WBV_040;
}
} else {
/* In case of a bus error we either kill the process or expect
* the kernel to catch the fault, which then is also responsible
* for cleaning up the mess.
*/
current->thread.signo = SIGBUS;
current->thread.faddr = fp->un.fmt7.faddr;
if (send_fault_sig(&fp->ptregs) >= 0)
printk("68040 bus error (ssw=%x, faddr=%lx)\n", ssw,
fp->un.fmt7.faddr);
goto disable_wb;
}
do_040writebacks(fp);
}
#endif /* CONFIG_M68040 */
#if defined(CONFIG_SUN3)
#include <asm/sun3mmu.h>
extern int mmu_emu_handle_fault (unsigned long, int, int);
/* sun3 version of bus_error030 */
static inline void bus_error030 (struct frame *fp)
{
unsigned char buserr_type = sun3_get_buserr ();
unsigned long addr, errorcode;
unsigned short ssw = fp->un.fmtb.ssw;
extern unsigned long _sun3_map_test_start, _sun3_map_test_end;
#ifdef DEBUG
if (ssw & (FC | FB))
printk ("Instruction fault at %#010lx\n",
ssw & FC ?
fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2
:
fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
if (ssw & DF)
printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n",
ssw & RW ? "read" : "write",
fp->un.fmtb.daddr,
space_names[ssw & DFC], fp->ptregs.pc);
#endif
/*
* Check if this page should be demand-mapped. This needs to go before
* the testing for a bad kernel-space access (demand-mapping applies
* to kernel accesses too).
*/
if ((ssw & DF)
&& (buserr_type & (SUN3_BUSERR_PROTERR | SUN3_BUSERR_INVALID))) {
if (mmu_emu_handle_fault (fp->un.fmtb.daddr, ssw & RW, 0))
return;
}
/* Check for kernel-space pagefault (BAD). */
if (fp->ptregs.sr & PS_S) {
/* kernel fault must be a data fault to user space */
if (! ((ssw & DF) && ((ssw & DFC) == USER_DATA))) {
// try checking the kernel mappings before surrender
if (mmu_emu_handle_fault (fp->un.fmtb.daddr, ssw & RW, 1))
return;
/* instruction fault or kernel data fault! */
if (ssw & (FC | FB))
printk ("Instruction fault at %#010lx\n",
fp->ptregs.pc);
if (ssw & DF) {
/* was this fault incurred testing bus mappings? */
if((fp->ptregs.pc >= (unsigned long)&_sun3_map_test_start) &&
(fp->ptregs.pc <= (unsigned long)&_sun3_map_test_end)) {
send_fault_sig(&fp->ptregs);
return;
}
printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n",
ssw & RW ? "read" : "write",
fp->un.fmtb.daddr,
space_names[ssw & DFC], fp->ptregs.pc);
}
printk ("BAD KERNEL BUSERR\n");
die_if_kernel("Oops", &fp->ptregs,0);
force_sig(SIGKILL, current);
return;
}
} else {
/* user fault */
if (!(ssw & (FC | FB)) && !(ssw & DF))
/* not an instruction fault or data fault! BAD */
panic ("USER BUSERR w/o instruction or data fault");
}
/* First handle the data fault, if any. */
if (ssw & DF) {
addr = fp->un.fmtb.daddr;
// errorcode bit 0: 0 -> no page 1 -> protection fault
// errorcode bit 1: 0 -> read fault 1 -> write fault
// (buserr_type & SUN3_BUSERR_PROTERR) -> protection fault
// (buserr_type & SUN3_BUSERR_INVALID) -> invalid page fault
if (buserr_type & SUN3_BUSERR_PROTERR)
errorcode = 0x01;
else if (buserr_type & SUN3_BUSERR_INVALID)
errorcode = 0x00;
else {
#ifdef DEBUG
printk ("*** unexpected busfault type=%#04x\n", buserr_type);
printk ("invalid %s access at %#lx from pc %#lx\n",
!(ssw & RW) ? "write" : "read", addr,
fp->ptregs.pc);
#endif
die_if_kernel ("Oops", &fp->ptregs, buserr_type);
force_sig (SIGBUS, current);
return;
}
//todo: wtf is RM bit? --m
if (!(ssw & RW) || ssw & RM)
errorcode |= 0x02;
/* Handle page fault. */
do_page_fault (&fp->ptregs, addr, errorcode);
/* Retry the data fault now. */
return;
}
/* Now handle the instruction fault. */
/* Get the fault address. */
if (fp->ptregs.format == 0xA)
addr = fp->ptregs.pc + 4;
else
addr = fp->un.fmtb.baddr;
if (ssw & FC)
addr -= 2;
if (buserr_type & SUN3_BUSERR_INVALID) {
if (!mmu_emu_handle_fault (fp->un.fmtb.daddr, 1, 0))
do_page_fault (&fp->ptregs, addr, 0);
} else {
#ifdef DEBUG
printk ("protection fault on insn access (segv).\n");
#endif
force_sig (SIGSEGV, current);
}
}
#else
#if defined(CPU_M68020_OR_M68030)
static inline void bus_error030 (struct frame *fp)
{
volatile unsigned short temp;
unsigned short mmusr;
unsigned long addr, errorcode;
unsigned short ssw = fp->un.fmtb.ssw;
#ifdef DEBUG
unsigned long desc;
printk ("pid = %x ", current->pid);
printk ("SSW=%#06x ", ssw);
if (ssw & (FC | FB))
printk ("Instruction fault at %#010lx\n",
ssw & FC ?
fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2
:
fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
if (ssw & DF)
printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n",
ssw & RW ? "read" : "write",
fp->un.fmtb.daddr,
space_names[ssw & DFC], fp->ptregs.pc);
#endif
/* ++andreas: If a data fault and an instruction fault happen
at the same time map in both pages. */
/* First handle the data fault, if any. */
if (ssw & DF) {
addr = fp->un.fmtb.daddr;
#ifdef DEBUG
asm volatile ("ptestr %3,%2@,#7,%0\n\t"
"pmove %%psr,%1@"
: "=a&" (desc)
: "a" (&temp), "a" (addr), "d" (ssw));
#else
asm volatile ("ptestr %2,%1@,#7\n\t"
"pmove %%psr,%0@"
: : "a" (&temp), "a" (addr), "d" (ssw));
#endif
mmusr = temp;
#ifdef DEBUG
printk("mmusr is %#x for addr %#lx in task %p\n",
mmusr, addr, current);
printk("descriptor address is %#lx, contents %#lx\n",
__va(desc), *(unsigned long *)__va(desc));
#endif
errorcode = (mmusr & MMU_I) ? 0 : 1;
if (!(ssw & RW) || (ssw & RM))
errorcode |= 2;
if (mmusr & (MMU_I | MMU_WP)) {
if (ssw & 4) {
printk("Data %s fault at %#010lx in %s (pc=%#lx)\n",
ssw & RW ? "read" : "write",
fp->un.fmtb.daddr,
space_names[ssw & DFC], fp->ptregs.pc);
goto buserr;
}
/* Don't try to do anything further if an exception was
handled. */
if (do_page_fault (&fp->ptregs, addr, errorcode) < 0)
return;
} else if (!(mmusr & MMU_I)) {
/* probably a 020 cas fault */
if (!(ssw & RM) && send_fault_sig(&fp->ptregs) > 0)
printk("unexpected bus error (%#x,%#x)\n", ssw, mmusr);
} else if (mmusr & (MMU_B|MMU_L|MMU_S)) {
printk("invalid %s access at %#lx from pc %#lx\n",
!(ssw & RW) ? "write" : "read", addr,
fp->ptregs.pc);
die_if_kernel("Oops",&fp->ptregs,mmusr);
force_sig(SIGSEGV, current);
return;
} else {
#if 0
static volatile long tlong;
#endif
printk("weird %s access at %#lx from pc %#lx (ssw is %#x)\n",
!(ssw & RW) ? "write" : "read", addr,
fp->ptregs.pc, ssw);
asm volatile ("ptestr #1,%1@,#0\n\t"
"pmove %%psr,%0@"
: /* no outputs */
: "a" (&temp), "a" (addr));
mmusr = temp;
printk ("level 0 mmusr is %#x\n", mmusr);
#if 0
asm volatile ("pmove %%tt0,%0@"
: /* no outputs */
: "a" (&tlong));
printk("tt0 is %#lx, ", tlong);
asm volatile ("pmove %%tt1,%0@"
: /* no outputs */
: "a" (&tlong));
printk("tt1 is %#lx\n", tlong);
#endif
#ifdef DEBUG
printk("Unknown SIGSEGV - 1\n");
#endif
die_if_kernel("Oops",&fp->ptregs,mmusr);
force_sig(SIGSEGV, current);
return;
}
/* setup an ATC entry for the access about to be retried */
if (!(ssw & RW) || (ssw & RM))
asm volatile ("ploadw %1,%0@" : /* no outputs */
: "a" (addr), "d" (ssw));
else
asm volatile ("ploadr %1,%0@" : /* no outputs */
: "a" (addr), "d" (ssw));
}
/* Now handle the instruction fault. */
if (!(ssw & (FC|FB)))
return;
if (fp->ptregs.sr & PS_S) {
printk("Instruction fault at %#010lx\n",
fp->ptregs.pc);
buserr:
printk ("BAD KERNEL BUSERR\n");
die_if_kernel("Oops",&fp->ptregs,0);
force_sig(SIGKILL, current);
return;
}
/* get the fault address */
if (fp->ptregs.format == 10)
addr = fp->ptregs.pc + 4;
else
addr = fp->un.fmtb.baddr;
if (ssw & FC)
addr -= 2;
if ((ssw & DF) && ((addr ^ fp->un.fmtb.daddr) & PAGE_MASK) == 0)
/* Insn fault on same page as data fault. But we
should still create the ATC entry. */
goto create_atc_entry;
#ifdef DEBUG
asm volatile ("ptestr #1,%2@,#7,%0\n\t"
"pmove %%psr,%1@"
: "=a&" (desc)
: "a" (&temp), "a" (addr));
#else
asm volatile ("ptestr #1,%1@,#7\n\t"
"pmove %%psr,%0@"
: : "a" (&temp), "a" (addr));
#endif
mmusr = temp;
#ifdef DEBUG
printk ("mmusr is %#x for addr %#lx in task %p\n",
mmusr, addr, current);
printk ("descriptor address is %#lx, contents %#lx\n",
__va(desc), *(unsigned long *)__va(desc));
#endif
if (mmusr & MMU_I)
do_page_fault (&fp->ptregs, addr, 0);
else if (mmusr & (MMU_B|MMU_L|MMU_S)) {
printk ("invalid insn access at %#lx from pc %#lx\n",
addr, fp->ptregs.pc);
#ifdef DEBUG
printk("Unknown SIGSEGV - 2\n");
#endif
die_if_kernel("Oops",&fp->ptregs,mmusr);
force_sig(SIGSEGV, current);
return;
}
create_atc_entry:
/* setup an ATC entry for the access about to be retried */
asm volatile ("ploadr #2,%0@" : /* no outputs */
: "a" (addr));
}
#endif /* CPU_M68020_OR_M68030 */
#endif /* !CONFIG_SUN3 */
asmlinkage void buserr_c(struct frame *fp)
{
/* Only set esp0 if coming from user mode */
if (user_mode(&fp->ptregs))
current->thread.esp0 = (unsigned long) fp;
#ifdef DEBUG
printk ("*** Bus Error *** Format is %x\n", fp->ptregs.format);
#endif
switch (fp->ptregs.format) {
#if defined (CONFIG_M68060)
case 4: /* 68060 access error */
access_error060 (fp);
break;
#endif
#if defined (CONFIG_M68040)
case 0x7: /* 68040 access error */
access_error040 (fp);
break;
#endif
#if defined (CPU_M68020_OR_M68030)
case 0xa:
case 0xb:
bus_error030 (fp);
break;
#endif
default:
die_if_kernel("bad frame format",&fp->ptregs,0);
#ifdef DEBUG
printk("Unknown SIGSEGV - 4\n");
#endif
force_sig(SIGSEGV, current);
}
}
static int kstack_depth_to_print = 48;
void show_trace(unsigned long *stack)
{
unsigned long *endstack;
unsigned long addr;
int i;
printk("Call Trace:");
addr = (unsigned long)stack + THREAD_SIZE - 1;
endstack = (unsigned long *)(addr & -THREAD_SIZE);
i = 0;
while (stack + 1 <= endstack) {
addr = *stack++;
/*
* If the address is either in the text segment of the
* kernel, or in the region which contains vmalloc'ed
* memory, it *may* be the address of a calling
* routine; if so, print it so that someone tracing
* down the cause of the crash will be able to figure
* out the call path that was taken.
*/
if (__kernel_text_address(addr)) {
#ifndef CONFIG_KALLSYMS
if (i % 5 == 0)
printk("\n ");
#endif
printk(" [<%08lx>] %pS\n", addr, (void *)addr);
i++;
}
}
printk("\n");
}
void show_registers(struct pt_regs *regs)
{
struct frame *fp = (struct frame *)regs;
mm_segment_t old_fs = get_fs();
u16 c, *cp;
unsigned long addr;
int i;
print_modules();
printk("PC: [<%08lx>] %pS\n", regs->pc, (void *)regs->pc);
printk("SR: %04x SP: %p a2: %08lx\n", regs->sr, regs, regs->a2);
printk("d0: %08lx d1: %08lx d2: %08lx d3: %08lx\n",
regs->d0, regs->d1, regs->d2, regs->d3);
printk("d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n",
regs->d4, regs->d5, regs->a0, regs->a1);
printk("Process %s (pid: %d, task=%p)\n",
current->comm, task_pid_nr(current), current);
addr = (unsigned long)&fp->un;
printk("Frame format=%X ", regs->format);
switch (regs->format) {
case 0x2:
printk("instr addr=%08lx\n", fp->un.fmt2.iaddr);
addr += sizeof(fp->un.fmt2);
break;
case 0x3:
printk("eff addr=%08lx\n", fp->un.fmt3.effaddr);
addr += sizeof(fp->un.fmt3);
break;
case 0x4:
printk((CPU_IS_060 ? "fault addr=%08lx fslw=%08lx\n"
: "eff addr=%08lx pc=%08lx\n"),
fp->un.fmt4.effaddr, fp->un.fmt4.pc);
addr += sizeof(fp->un.fmt4);
break;
case 0x7:
printk("eff addr=%08lx ssw=%04x faddr=%08lx\n",
fp->un.fmt7.effaddr, fp->un.fmt7.ssw, fp->un.fmt7.faddr);
printk("wb 1 stat/addr/data: %04x %08lx %08lx\n",
fp->un.fmt7.wb1s, fp->un.fmt7.wb1a, fp->un.fmt7.wb1dpd0);
printk("wb 2 stat/addr/data: %04x %08lx %08lx\n",
fp->un.fmt7.wb2s, fp->un.fmt7.wb2a, fp->un.fmt7.wb2d);
printk("wb 3 stat/addr/data: %04x %08lx %08lx\n",
fp->un.fmt7.wb3s, fp->un.fmt7.wb3a, fp->un.fmt7.wb3d);
printk("push data: %08lx %08lx %08lx %08lx\n",
fp->un.fmt7.wb1dpd0, fp->un.fmt7.pd1, fp->un.fmt7.pd2,
fp->un.fmt7.pd3);
addr += sizeof(fp->un.fmt7);
break;
case 0x9:
printk("instr addr=%08lx\n", fp->un.fmt9.iaddr);
addr += sizeof(fp->un.fmt9);
break;
case 0xa:
printk("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n",
fp->un.fmta.ssw, fp->un.fmta.isc, fp->un.fmta.isb,
fp->un.fmta.daddr, fp->un.fmta.dobuf);
addr += sizeof(fp->un.fmta);
break;
case 0xb:
printk("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n",
fp->un.fmtb.ssw, fp->un.fmtb.isc, fp->un.fmtb.isb,
fp->un.fmtb.daddr, fp->un.fmtb.dobuf);
printk("baddr=%08lx dibuf=%08lx ver=%x\n",
fp->un.fmtb.baddr, fp->un.fmtb.dibuf, fp->un.fmtb.ver);
addr += sizeof(fp->un.fmtb);
break;
default:
printk("\n");
}
show_stack(NULL, (unsigned long *)addr);
printk("Code:");
set_fs(KERNEL_DS);
cp = (u16 *)regs->pc;
for (i = -8; i < 16; i++) {
if (get_user(c, cp + i) && i >= 0) {
printk(" Bad PC value.");
break;
}
printk(i ? " %04x" : " <%04x>", c);
}
set_fs(old_fs);
printk ("\n");
}
void show_stack(struct task_struct *task, unsigned long *stack)
{
unsigned long *p;
unsigned long *endstack;
int i;
if (!stack) {
if (task)
stack = (unsigned long *)task->thread.esp0;
else
stack = (unsigned long *)&stack;
}
endstack = (unsigned long *)(((unsigned long)stack + THREAD_SIZE - 1) & -THREAD_SIZE);
printk("Stack from %08lx:", (unsigned long)stack);
p = stack;
for (i = 0; i < kstack_depth_to_print; i++) {
if (p + 1 > endstack)
break;
if (i % 8 == 0)
printk("\n ");
printk(" %08lx", *p++);
}
printk("\n");
show_trace(stack);
}
/*
* The architecture-independent backtrace generator
*/
void dump_stack(void)
{
unsigned long stack;
show_trace(&stack);
}
EXPORT_SYMBOL(dump_stack);
void bad_super_trap (struct frame *fp)
{
console_verbose();
if (fp->ptregs.vector < 4 * ARRAY_SIZE(vec_names))
printk ("*** %s *** FORMAT=%X\n",
vec_names[(fp->ptregs.vector) >> 2],
fp->ptregs.format);
else
printk ("*** Exception %d *** FORMAT=%X\n",
(fp->ptregs.vector) >> 2,
fp->ptregs.format);
if (fp->ptregs.vector >> 2 == VEC_ADDRERR && CPU_IS_020_OR_030) {
unsigned short ssw = fp->un.fmtb.ssw;
printk ("SSW=%#06x ", ssw);
if (ssw & RC)
printk ("Pipe stage C instruction fault at %#010lx\n",
(fp->ptregs.format) == 0xA ?
fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2);
if (ssw & RB)
printk ("Pipe stage B instruction fault at %#010lx\n",
(fp->ptregs.format) == 0xA ?
fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
if (ssw & DF)
printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n",
ssw & RW ? "read" : "write",
fp->un.fmtb.daddr, space_names[ssw & DFC],
fp->ptregs.pc);
}
printk ("Current process id is %d\n", task_pid_nr(current));
die_if_kernel("BAD KERNEL TRAP", &fp->ptregs, 0);
}
asmlinkage void trap_c(struct frame *fp)
{
int sig;
siginfo_t info;
if (fp->ptregs.sr & PS_S) {
if (fp->ptregs.vector == VEC_TRACE << 2) {
/* traced a trapping instruction on a 68020/30,
* real exception will be executed afterwards.
*/
} else if (!handle_kernel_fault(&fp->ptregs))
bad_super_trap(fp);
return;
}
/* send the appropriate signal to the user program */
switch ((fp->ptregs.vector) >> 2) {
case VEC_ADDRERR:
info.si_code = BUS_ADRALN;
sig = SIGBUS;
break;
case VEC_ILLEGAL:
case VEC_LINE10:
case VEC_LINE11:
info.si_code = ILL_ILLOPC;
sig = SIGILL;
break;
case VEC_PRIV:
info.si_code = ILL_PRVOPC;
sig = SIGILL;
break;
case VEC_COPROC:
info.si_code = ILL_COPROC;
sig = SIGILL;
break;
case VEC_TRAP1:
case VEC_TRAP2:
case VEC_TRAP3:
case VEC_TRAP4:
case VEC_TRAP5:
case VEC_TRAP6:
case VEC_TRAP7:
case VEC_TRAP8:
case VEC_TRAP9:
case VEC_TRAP10:
case VEC_TRAP11:
case VEC_TRAP12:
case VEC_TRAP13:
case VEC_TRAP14:
info.si_code = ILL_ILLTRP;
sig = SIGILL;
break;
case VEC_FPBRUC:
case VEC_FPOE:
case VEC_FPNAN:
info.si_code = FPE_FLTINV;
sig = SIGFPE;
break;
case VEC_FPIR:
info.si_code = FPE_FLTRES;
sig = SIGFPE;
break;
case VEC_FPDIVZ:
info.si_code = FPE_FLTDIV;
sig = SIGFPE;
break;
case VEC_FPUNDER:
info.si_code = FPE_FLTUND;
sig = SIGFPE;
break;
case VEC_FPOVER:
info.si_code = FPE_FLTOVF;
sig = SIGFPE;
break;
case VEC_ZERODIV:
info.si_code = FPE_INTDIV;
sig = SIGFPE;
break;
case VEC_CHK:
case VEC_TRAP:
info.si_code = FPE_INTOVF;
sig = SIGFPE;
break;
case VEC_TRACE: /* ptrace single step */
info.si_code = TRAP_TRACE;
sig = SIGTRAP;
break;
case VEC_TRAP15: /* breakpoint */
info.si_code = TRAP_BRKPT;
sig = SIGTRAP;
break;
default:
info.si_code = ILL_ILLOPC;
sig = SIGILL;
break;
}
info.si_signo = sig;
info.si_errno = 0;
switch (fp->ptregs.format) {
default:
info.si_addr = (void *) fp->ptregs.pc;
break;
case 2:
info.si_addr = (void *) fp->un.fmt2.iaddr;
break;
case 7:
info.si_addr = (void *) fp->un.fmt7.effaddr;
break;
case 9:
info.si_addr = (void *) fp->un.fmt9.iaddr;
break;
case 10:
info.si_addr = (void *) fp->un.fmta.daddr;
break;
case 11:
info.si_addr = (void *) fp->un.fmtb.daddr;
break;
}
force_sig_info (sig, &info, current);
}
void die_if_kernel (char *str, struct pt_regs *fp, int nr)
{
if (!(fp->sr & PS_S))
return;
console_verbose();
printk("%s: %08x\n",str,nr);
show_registers(fp);
add_taint(TAINT_DIE);
do_exit(SIGSEGV);
}
/*
* This function is called if an error occur while accessing
* user-space from the fpsp040 code.
*/
asmlinkage void fpsp040_die(void)
{
do_exit(SIGSEGV);
}
#ifdef CONFIG_M68KFPU_EMU
asmlinkage void fpemu_signal(int signal, int code, void *addr)
{
siginfo_t info;
info.si_signo = signal;
info.si_errno = 0;
info.si_code = code;
info.si_addr = addr;
force_sig_info(signal, &info, current);
}
#endif #endif
/*
* linux/arch/m68k/kernel/traps.c
*
* Copyright (C) 1993, 1994 by Hamish Macdonald
*
* 68040 fixes by Michael Rausch
* 68040 fixes by Martin Apel
* 68040 fixes and writeback by Richard Zidlicky
* 68060 fixes by Roman Hodek
* 68060 fixes by Jesper Skov
*
* This file is subject to the terms and conditions of the GNU General Public
* License. See the file COPYING in the main directory of this archive
* for more details.
*/
/*
* Sets up all exception vectors
*/
#include <linux/sched.h>
#include <linux/signal.h>
#include <linux/kernel.h>
#include <linux/mm.h>
#include <linux/module.h>
#include <linux/user.h>
#include <linux/string.h>
#include <linux/linkage.h>
#include <linux/init.h>
#include <linux/ptrace.h>
#include <linux/kallsyms.h>
#include <asm/setup.h>
#include <asm/fpu.h>
#include <asm/system.h>
#include <asm/uaccess.h>
#include <asm/traps.h>
#include <asm/pgalloc.h>
#include <asm/machdep.h>
#include <asm/siginfo.h>
/* assembler routines */
asmlinkage void system_call(void);
asmlinkage void buserr(void);
asmlinkage void trap(void);
asmlinkage void nmihandler(void);
#ifdef CONFIG_M68KFPU_EMU
asmlinkage void fpu_emu(void);
#endif
e_vector vectors[256];
/* nmi handler for the Amiga */
asm(".text\n"
__ALIGN_STR "\n"
"nmihandler: rte");
/*
* this must be called very early as the kernel might
* use some instruction that are emulated on the 060
* and so we're prepared for early probe attempts (e.g. nf_init).
*/
void __init base_trap_init(void)
{
if (MACH_IS_SUN3X) {
extern e_vector *sun3x_prom_vbr;
__asm__ volatile ("movec %%vbr, %0" : "=r" (sun3x_prom_vbr));
}
/* setup the exception vector table */
__asm__ volatile ("movec %0,%%vbr" : : "r" ((void*)vectors));
if (CPU_IS_060) {
/* set up ISP entry points */
asmlinkage void unimp_vec(void) asm ("_060_isp_unimp");
vectors[VEC_UNIMPII] = unimp_vec;
}
vectors[VEC_BUSERR] = buserr;
vectors[VEC_ILLEGAL] = trap;
vectors[VEC_SYS] = system_call;
}
void __init trap_init (void)
{
int i;
for (i = VEC_SPUR; i <= VEC_INT7; i++)
vectors[i] = bad_inthandler;
for (i = 0; i < VEC_USER; i++)
if (!vectors[i])
vectors[i] = trap;
for (i = VEC_USER; i < 256; i++)
vectors[i] = bad_inthandler;
#ifdef CONFIG_M68KFPU_EMU
if (FPU_IS_EMU)
vectors[VEC_LINE11] = fpu_emu;
#endif
if (CPU_IS_040 && !FPU_IS_EMU) {
/* set up FPSP entry points */
asmlinkage void dz_vec(void) asm ("dz");
asmlinkage void inex_vec(void) asm ("inex");
asmlinkage void ovfl_vec(void) asm ("ovfl");
asmlinkage void unfl_vec(void) asm ("unfl");
asmlinkage void snan_vec(void) asm ("snan");
asmlinkage void operr_vec(void) asm ("operr");
asmlinkage void bsun_vec(void) asm ("bsun");
asmlinkage void fline_vec(void) asm ("fline");
asmlinkage void unsupp_vec(void) asm ("unsupp");
vectors[VEC_FPDIVZ] = dz_vec;
vectors[VEC_FPIR] = inex_vec;
vectors[VEC_FPOVER] = ovfl_vec;
vectors[VEC_FPUNDER] = unfl_vec;
vectors[VEC_FPNAN] = snan_vec;
vectors[VEC_FPOE] = operr_vec;
vectors[VEC_FPBRUC] = bsun_vec;
vectors[VEC_LINE11] = fline_vec;
vectors[VEC_FPUNSUP] = unsupp_vec;
}
if (CPU_IS_060 && !FPU_IS_EMU) {
/* set up IFPSP entry points */
asmlinkage void snan_vec6(void) asm ("_060_fpsp_snan");
asmlinkage void operr_vec6(void) asm ("_060_fpsp_operr");
asmlinkage void ovfl_vec6(void) asm ("_060_fpsp_ovfl");
asmlinkage void unfl_vec6(void) asm ("_060_fpsp_unfl");
asmlinkage void dz_vec6(void) asm ("_060_fpsp_dz");
asmlinkage void inex_vec6(void) asm ("_060_fpsp_inex");
asmlinkage void fline_vec6(void) asm ("_060_fpsp_fline");
asmlinkage void unsupp_vec6(void) asm ("_060_fpsp_unsupp");
asmlinkage void effadd_vec6(void) asm ("_060_fpsp_effadd");
vectors[VEC_FPNAN] = snan_vec6;
vectors[VEC_FPOE] = operr_vec6;
vectors[VEC_FPOVER] = ovfl_vec6;
vectors[VEC_FPUNDER] = unfl_vec6;
vectors[VEC_FPDIVZ] = dz_vec6;
vectors[VEC_FPIR] = inex_vec6;
vectors[VEC_LINE11] = fline_vec6;
vectors[VEC_FPUNSUP] = unsupp_vec6;
vectors[VEC_UNIMPEA] = effadd_vec6;
}
/* if running on an amiga, make the NMI interrupt do nothing */
if (MACH_IS_AMIGA) {
vectors[VEC_INT7] = nmihandler;
}
}
static const char *vec_names[] = {
[VEC_RESETSP] = "RESET SP",
[VEC_RESETPC] = "RESET PC",
[VEC_BUSERR] = "BUS ERROR",
[VEC_ADDRERR] = "ADDRESS ERROR",
[VEC_ILLEGAL] = "ILLEGAL INSTRUCTION",
[VEC_ZERODIV] = "ZERO DIVIDE",
[VEC_CHK] = "CHK",
[VEC_TRAP] = "TRAPcc",
[VEC_PRIV] = "PRIVILEGE VIOLATION",
[VEC_TRACE] = "TRACE",
[VEC_LINE10] = "LINE 1010",
[VEC_LINE11] = "LINE 1111",
[VEC_RESV12] = "UNASSIGNED RESERVED 12",
[VEC_COPROC] = "COPROCESSOR PROTOCOL VIOLATION",
[VEC_FORMAT] = "FORMAT ERROR",
[VEC_UNINT] = "UNINITIALIZED INTERRUPT",
[VEC_RESV16] = "UNASSIGNED RESERVED 16",
[VEC_RESV17] = "UNASSIGNED RESERVED 17",
[VEC_RESV18] = "UNASSIGNED RESERVED 18",
[VEC_RESV19] = "UNASSIGNED RESERVED 19",
[VEC_RESV20] = "UNASSIGNED RESERVED 20",
[VEC_RESV21] = "UNASSIGNED RESERVED 21",
[VEC_RESV22] = "UNASSIGNED RESERVED 22",
[VEC_RESV23] = "UNASSIGNED RESERVED 23",
[VEC_SPUR] = "SPURIOUS INTERRUPT",
[VEC_INT1] = "LEVEL 1 INT",
[VEC_INT2] = "LEVEL 2 INT",
[VEC_INT3] = "LEVEL 3 INT",
[VEC_INT4] = "LEVEL 4 INT",
[VEC_INT5] = "LEVEL 5 INT",
[VEC_INT6] = "LEVEL 6 INT",
[VEC_INT7] = "LEVEL 7 INT",
[VEC_SYS] = "SYSCALL",
[VEC_TRAP1] = "TRAP #1",
[VEC_TRAP2] = "TRAP #2",
[VEC_TRAP3] = "TRAP #3",
[VEC_TRAP4] = "TRAP #4",
[VEC_TRAP5] = "TRAP #5",
[VEC_TRAP6] = "TRAP #6",
[VEC_TRAP7] = "TRAP #7",
[VEC_TRAP8] = "TRAP #8",
[VEC_TRAP9] = "TRAP #9",
[VEC_TRAP10] = "TRAP #10",
[VEC_TRAP11] = "TRAP #11",
[VEC_TRAP12] = "TRAP #12",
[VEC_TRAP13] = "TRAP #13",
[VEC_TRAP14] = "TRAP #14",
[VEC_TRAP15] = "TRAP #15",
[VEC_FPBRUC] = "FPCP BSUN",
[VEC_FPIR] = "FPCP INEXACT",
[VEC_FPDIVZ] = "FPCP DIV BY 0",
[VEC_FPUNDER] = "FPCP UNDERFLOW",
[VEC_FPOE] = "FPCP OPERAND ERROR",
[VEC_FPOVER] = "FPCP OVERFLOW",
[VEC_FPNAN] = "FPCP SNAN",
[VEC_FPUNSUP] = "FPCP UNSUPPORTED OPERATION",
[VEC_MMUCFG] = "MMU CONFIGURATION ERROR",
[VEC_MMUILL] = "MMU ILLEGAL OPERATION ERROR",
[VEC_MMUACC] = "MMU ACCESS LEVEL VIOLATION ERROR",
[VEC_RESV59] = "UNASSIGNED RESERVED 59",
[VEC_UNIMPEA] = "UNASSIGNED RESERVED 60",
[VEC_UNIMPII] = "UNASSIGNED RESERVED 61",
[VEC_RESV62] = "UNASSIGNED RESERVED 62",
[VEC_RESV63] = "UNASSIGNED RESERVED 63",
};
static const char *space_names[] = {
[0] = "Space 0",
[USER_DATA] = "User Data",
[USER_PROGRAM] = "User Program",
#ifndef CONFIG_SUN3
[3] = "Space 3",
#else
[FC_CONTROL] = "Control",
#endif
[4] = "Space 4",
[SUPER_DATA] = "Super Data",
[SUPER_PROGRAM] = "Super Program",
[CPU_SPACE] = "CPU"
};
void die_if_kernel(char *,struct pt_regs *,int);
asmlinkage int do_page_fault(struct pt_regs *regs, unsigned long address,
unsigned long error_code);
int send_fault_sig(struct pt_regs *regs);
asmlinkage void trap_c(struct frame *fp);
#if defined (CONFIG_M68060)
static inline void access_error060 (struct frame *fp)
{
unsigned long fslw = fp->un.fmt4.pc; /* is really FSLW for access error */
#ifdef DEBUG
printk("fslw=%#lx, fa=%#lx\n", fslw, fp->un.fmt4.effaddr);
#endif
if (fslw & MMU060_BPE) {
/* branch prediction error -> clear branch cache */
__asm__ __volatile__ ("movec %/cacr,%/d0\n\t"
"orl #0x00400000,%/d0\n\t"
"movec %/d0,%/cacr"
: : : "d0" );
/* return if there's no other error */
if (!(fslw & MMU060_ERR_BITS) && !(fslw & MMU060_SEE))
return;
}
if (fslw & (MMU060_DESC_ERR | MMU060_WP | MMU060_SP)) {
unsigned long errorcode;
unsigned long addr = fp->un.fmt4.effaddr;
if (fslw & MMU060_MA)
addr = (addr + PAGE_SIZE - 1) & PAGE_MASK;
errorcode = 1;
if (fslw & MMU060_DESC_ERR) {
__flush_tlb040_one(addr);
errorcode = 0;
}
if (fslw & MMU060_W)
errorcode |= 2;
#ifdef DEBUG
printk("errorcode = %d\n", errorcode );
#endif
do_page_fault(&fp->ptregs, addr, errorcode);
} else if (fslw & (MMU060_SEE)){
/* Software Emulation Error.
* fault during mem_read/mem_write in ifpsp060/os.S
*/
send_fault_sig(&fp->ptregs);
} else if (!(fslw & (MMU060_RE|MMU060_WE)) ||
send_fault_sig(&fp->ptregs) > 0) {
printk("pc=%#lx, fa=%#lx\n", fp->ptregs.pc, fp->un.fmt4.effaddr);
printk( "68060 access error, fslw=%lx\n", fslw );
trap_c( fp );
}
}
#endif /* CONFIG_M68060 */
#if defined (CONFIG_M68040)
static inline unsigned long probe040(int iswrite, unsigned long addr, int wbs)
{
unsigned long mmusr;
mm_segment_t old_fs = get_fs();
set_fs(MAKE_MM_SEG(wbs));
if (iswrite)
asm volatile (".chip 68040; ptestw (%0); .chip 68k" : : "a" (addr));
else
asm volatile (".chip 68040; ptestr (%0); .chip 68k" : : "a" (addr));
asm volatile (".chip 68040; movec %%mmusr,%0; .chip 68k" : "=r" (mmusr));
set_fs(old_fs);
return mmusr;
}
static inline int do_040writeback1(unsigned short wbs, unsigned long wba,
unsigned long wbd)
{
int res = 0;
mm_segment_t old_fs = get_fs();
/* set_fs can not be moved, otherwise put_user() may oops */
set_fs(MAKE_MM_SEG(wbs));
switch (wbs & WBSIZ_040) {
case BA_SIZE_BYTE:
res = put_user(wbd & 0xff, (char __user *)wba);
break;
case BA_SIZE_WORD:
res = put_user(wbd & 0xffff, (short __user *)wba);
break;
case BA_SIZE_LONG:
res = put_user(wbd, (int __user *)wba);
break;
}
/* set_fs can not be moved, otherwise put_user() may oops */
set_fs(old_fs);
#ifdef DEBUG
printk("do_040writeback1, res=%d\n",res);
#endif
return res;
}
/* after an exception in a writeback the stack frame corresponding
* to that exception is discarded, set a few bits in the old frame
* to simulate what it should look like
*/
static inline void fix_xframe040(struct frame *fp, unsigned long wba, unsigned short wbs)
{
fp->un.fmt7.faddr = wba;
fp->un.fmt7.ssw = wbs & 0xff;
if (wba != current->thread.faddr)
fp->un.fmt7.ssw |= MA_040;
}
static inline void do_040writebacks(struct frame *fp)
{
int res = 0;
#if 0
if (fp->un.fmt7.wb1s & WBV_040)
printk("access_error040: cannot handle 1st writeback. oops.\n");
#endif
if ((fp->un.fmt7.wb2s & WBV_040) &&
!(fp->un.fmt7.wb2s & WBTT_040)) {
res = do_040writeback1(fp->un.fmt7.wb2s, fp->un.fmt7.wb2a,
fp->un.fmt7.wb2d);
if (res)
fix_xframe040(fp, fp->un.fmt7.wb2a, fp->un.fmt7.wb2s);
else
fp->un.fmt7.wb2s = 0;
}
/* do the 2nd wb only if the first one was successful (except for a kernel wb) */
if (fp->un.fmt7.wb3s & WBV_040 && (!res || fp->un.fmt7.wb3s & 4)) {
res = do_040writeback1(fp->un.fmt7.wb3s, fp->un.fmt7.wb3a,
fp->un.fmt7.wb3d);
if (res)
{
fix_xframe040(fp, fp->un.fmt7.wb3a, fp->un.fmt7.wb3s);
fp->un.fmt7.wb2s = fp->un.fmt7.wb3s;
fp->un.fmt7.wb3s &= (~WBV_040);
fp->un.fmt7.wb2a = fp->un.fmt7.wb3a;
fp->un.fmt7.wb2d = fp->un.fmt7.wb3d;
}
else
fp->un.fmt7.wb3s = 0;
}
if (res)
send_fault_sig(&fp->ptregs);
}
/*
* called from sigreturn(), must ensure userspace code didn't
* manipulate exception frame to circumvent protection, then complete
* pending writebacks
* we just clear TM2 to turn it into a userspace access
*/
asmlinkage void berr_040cleanup(struct frame *fp)
{
fp->un.fmt7.wb2s &= ~4;
fp->un.fmt7.wb3s &= ~4;
do_040writebacks(fp);
}
static inline void access_error040(struct frame *fp)
{
unsigned short ssw = fp->un.fmt7.ssw;
unsigned long mmusr;
#ifdef DEBUG
printk("ssw=%#x, fa=%#lx\n", ssw, fp->un.fmt7.faddr);
printk("wb1s=%#x, wb2s=%#x, wb3s=%#x\n", fp->un.fmt7.wb1s,
fp->un.fmt7.wb2s, fp->un.fmt7.wb3s);
printk ("wb2a=%lx, wb3a=%lx, wb2d=%lx, wb3d=%lx\n",
fp->un.fmt7.wb2a, fp->un.fmt7.wb3a,
fp->un.fmt7.wb2d, fp->un.fmt7.wb3d);
#endif
if (ssw & ATC_040) {
unsigned long addr = fp->un.fmt7.faddr;
unsigned long errorcode;
/*
* The MMU status has to be determined AFTER the address
* has been corrected if there was a misaligned access (MA).
*/
if (ssw & MA_040)
addr = (addr + 7) & -8;
/* MMU error, get the MMUSR info for this access */
mmusr = probe040(!(ssw & RW_040), addr, ssw);
#ifdef DEBUG
printk("mmusr = %lx\n", mmusr);
#endif
errorcode = 1;
if (!(mmusr & MMU_R_040)) {
/* clear the invalid atc entry */
__flush_tlb040_one(addr);
errorcode = 0;
}
/* despite what documentation seems to say, RMW
* accesses have always both the LK and RW bits set */
if (!(ssw & RW_040) || (ssw & LK_040))
errorcode |= 2;
if (do_page_fault(&fp->ptregs, addr, errorcode)) {
#ifdef DEBUG
printk("do_page_fault() !=0\n");
#endif
if (user_mode(&fp->ptregs)){
/* delay writebacks after signal delivery */
#ifdef DEBUG
printk(".. was usermode - return\n");
#endif
return;
}
/* disable writeback into user space from kernel
* (if do_page_fault didn't fix the mapping,
* the writeback won't do good)
*/
disable_wb:
#ifdef DEBUG
printk(".. disabling wb2\n");
#endif
if (fp->un.fmt7.wb2a == fp->un.fmt7.faddr)
fp->un.fmt7.wb2s &= ~WBV_040;
if (fp->un.fmt7.wb3a == fp->un.fmt7.faddr)
fp->un.fmt7.wb3s &= ~WBV_040;
}
} else {
/* In case of a bus error we either kill the process or expect
* the kernel to catch the fault, which then is also responsible
* for cleaning up the mess.
*/
current->thread.signo = SIGBUS;
current->thread.faddr = fp->un.fmt7.faddr;
if (send_fault_sig(&fp->ptregs) >= 0)
printk("68040 bus error (ssw=%x, faddr=%lx)\n", ssw,
fp->un.fmt7.faddr);
goto disable_wb;
}
do_040writebacks(fp);
}
#endif /* CONFIG_M68040 */
#if defined(CONFIG_SUN3)
#include <asm/sun3mmu.h>
extern int mmu_emu_handle_fault (unsigned long, int, int);
/* sun3 version of bus_error030 */
static inline void bus_error030 (struct frame *fp)
{
unsigned char buserr_type = sun3_get_buserr ();
unsigned long addr, errorcode;
unsigned short ssw = fp->un.fmtb.ssw;
extern unsigned long _sun3_map_test_start, _sun3_map_test_end;
#ifdef DEBUG
if (ssw & (FC | FB))
printk ("Instruction fault at %#010lx\n",
ssw & FC ?
fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2
:
fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
if (ssw & DF)
printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n",
ssw & RW ? "read" : "write",
fp->un.fmtb.daddr,
space_names[ssw & DFC], fp->ptregs.pc);
#endif
/*
* Check if this page should be demand-mapped. This needs to go before
* the testing for a bad kernel-space access (demand-mapping applies
* to kernel accesses too).
*/
if ((ssw & DF)
&& (buserr_type & (SUN3_BUSERR_PROTERR | SUN3_BUSERR_INVALID))) {
if (mmu_emu_handle_fault (fp->un.fmtb.daddr, ssw & RW, 0))
return;
}
/* Check for kernel-space pagefault (BAD). */
if (fp->ptregs.sr & PS_S) {
/* kernel fault must be a data fault to user space */
if (! ((ssw & DF) && ((ssw & DFC) == USER_DATA))) {
// try checking the kernel mappings before surrender
if (mmu_emu_handle_fault (fp->un.fmtb.daddr, ssw & RW, 1))
return;
/* instruction fault or kernel data fault! */
if (ssw & (FC | FB))
printk ("Instruction fault at %#010lx\n",
fp->ptregs.pc);
if (ssw & DF) {
/* was this fault incurred testing bus mappings? */
if((fp->ptregs.pc >= (unsigned long)&_sun3_map_test_start) &&
(fp->ptregs.pc <= (unsigned long)&_sun3_map_test_end)) {
send_fault_sig(&fp->ptregs);
return;
}
printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n",
ssw & RW ? "read" : "write",
fp->un.fmtb.daddr,
space_names[ssw & DFC], fp->ptregs.pc);
}
printk ("BAD KERNEL BUSERR\n");
die_if_kernel("Oops", &fp->ptregs,0);
force_sig(SIGKILL, current);
return;
}
} else {
/* user fault */
if (!(ssw & (FC | FB)) && !(ssw & DF))
/* not an instruction fault or data fault! BAD */
panic ("USER BUSERR w/o instruction or data fault");
}
/* First handle the data fault, if any. */
if (ssw & DF) {
addr = fp->un.fmtb.daddr;
// errorcode bit 0: 0 -> no page 1 -> protection fault
// errorcode bit 1: 0 -> read fault 1 -> write fault
// (buserr_type & SUN3_BUSERR_PROTERR) -> protection fault
// (buserr_type & SUN3_BUSERR_INVALID) -> invalid page fault
if (buserr_type & SUN3_BUSERR_PROTERR)
errorcode = 0x01;
else if (buserr_type & SUN3_BUSERR_INVALID)
errorcode = 0x00;
else {
#ifdef DEBUG
printk ("*** unexpected busfault type=%#04x\n", buserr_type);
printk ("invalid %s access at %#lx from pc %#lx\n",
!(ssw & RW) ? "write" : "read", addr,
fp->ptregs.pc);
#endif
die_if_kernel ("Oops", &fp->ptregs, buserr_type);
force_sig (SIGBUS, current);
return;
}
//todo: wtf is RM bit? --m
if (!(ssw & RW) || ssw & RM)
errorcode |= 0x02;
/* Handle page fault. */
do_page_fault (&fp->ptregs, addr, errorcode);
/* Retry the data fault now. */
return;
}
/* Now handle the instruction fault. */
/* Get the fault address. */
if (fp->ptregs.format == 0xA)
addr = fp->ptregs.pc + 4;
else
addr = fp->un.fmtb.baddr;
if (ssw & FC)
addr -= 2;
if (buserr_type & SUN3_BUSERR_INVALID) {
if (!mmu_emu_handle_fault (fp->un.fmtb.daddr, 1, 0))
do_page_fault (&fp->ptregs, addr, 0);
} else {
#ifdef DEBUG
printk ("protection fault on insn access (segv).\n");
#endif
force_sig (SIGSEGV, current);
}
}
#else
#if defined(CPU_M68020_OR_M68030)
static inline void bus_error030 (struct frame *fp)
{
volatile unsigned short temp;
unsigned short mmusr;
unsigned long addr, errorcode;
unsigned short ssw = fp->un.fmtb.ssw;
#ifdef DEBUG
unsigned long desc;
printk ("pid = %x ", current->pid);
printk ("SSW=%#06x ", ssw);
if (ssw & (FC | FB))
printk ("Instruction fault at %#010lx\n",
ssw & FC ?
fp->ptregs.format == 0xa ? fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2
:
fp->ptregs.format == 0xa ? fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
if (ssw & DF)
printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n",
ssw & RW ? "read" : "write",
fp->un.fmtb.daddr,
space_names[ssw & DFC], fp->ptregs.pc);
#endif
/* ++andreas: If a data fault and an instruction fault happen
at the same time map in both pages. */
/* First handle the data fault, if any. */
if (ssw & DF) {
addr = fp->un.fmtb.daddr;
#ifdef DEBUG
asm volatile ("ptestr %3,%2@,#7,%0\n\t"
"pmove %%psr,%1@"
: "=a&" (desc)
: "a" (&temp), "a" (addr), "d" (ssw));
#else
asm volatile ("ptestr %2,%1@,#7\n\t"
"pmove %%psr,%0@"
: : "a" (&temp), "a" (addr), "d" (ssw));
#endif
mmusr = temp;
#ifdef DEBUG
printk("mmusr is %#x for addr %#lx in task %p\n",
mmusr, addr, current);
printk("descriptor address is %#lx, contents %#lx\n",
__va(desc), *(unsigned long *)__va(desc));
#endif
errorcode = (mmusr & MMU_I) ? 0 : 1;
if (!(ssw & RW) || (ssw & RM))
errorcode |= 2;
if (mmusr & (MMU_I | MMU_WP)) {
if (ssw & 4) {
printk("Data %s fault at %#010lx in %s (pc=%#lx)\n",
ssw & RW ? "read" : "write",
fp->un.fmtb.daddr,
space_names[ssw & DFC], fp->ptregs.pc);
goto buserr;
}
/* Don't try to do anything further if an exception was
handled. */
if (do_page_fault (&fp->ptregs, addr, errorcode) < 0)
return;
} else if (!(mmusr & MMU_I)) {
/* probably a 020 cas fault */
if (!(ssw & RM) && send_fault_sig(&fp->ptregs) > 0)
printk("unexpected bus error (%#x,%#x)\n", ssw, mmusr);
} else if (mmusr & (MMU_B|MMU_L|MMU_S)) {
printk("invalid %s access at %#lx from pc %#lx\n",
!(ssw & RW) ? "write" : "read", addr,
fp->ptregs.pc);
die_if_kernel("Oops",&fp->ptregs,mmusr);
force_sig(SIGSEGV, current);
return;
} else {
#if 0
static volatile long tlong;
#endif
printk("weird %s access at %#lx from pc %#lx (ssw is %#x)\n",
!(ssw & RW) ? "write" : "read", addr,
fp->ptregs.pc, ssw);
asm volatile ("ptestr #1,%1@,#0\n\t"
"pmove %%psr,%0@"
: /* no outputs */
: "a" (&temp), "a" (addr));
mmusr = temp;
printk ("level 0 mmusr is %#x\n", mmusr);
#if 0
asm volatile ("pmove %%tt0,%0@"
: /* no outputs */
: "a" (&tlong));
printk("tt0 is %#lx, ", tlong);
asm volatile ("pmove %%tt1,%0@"
: /* no outputs */
: "a" (&tlong));
printk("tt1 is %#lx\n", tlong);
#endif
#ifdef DEBUG
printk("Unknown SIGSEGV - 1\n");
#endif
die_if_kernel("Oops",&fp->ptregs,mmusr);
force_sig(SIGSEGV, current);
return;
}
/* setup an ATC entry for the access about to be retried */
if (!(ssw & RW) || (ssw & RM))
asm volatile ("ploadw %1,%0@" : /* no outputs */
: "a" (addr), "d" (ssw));
else
asm volatile ("ploadr %1,%0@" : /* no outputs */
: "a" (addr), "d" (ssw));
}
/* Now handle the instruction fault. */
if (!(ssw & (FC|FB)))
return;
if (fp->ptregs.sr & PS_S) {
printk("Instruction fault at %#010lx\n",
fp->ptregs.pc);
buserr:
printk ("BAD KERNEL BUSERR\n");
die_if_kernel("Oops",&fp->ptregs,0);
force_sig(SIGKILL, current);
return;
}
/* get the fault address */
if (fp->ptregs.format == 10)
addr = fp->ptregs.pc + 4;
else
addr = fp->un.fmtb.baddr;
if (ssw & FC)
addr -= 2;
if ((ssw & DF) && ((addr ^ fp->un.fmtb.daddr) & PAGE_MASK) == 0)
/* Insn fault on same page as data fault. But we
should still create the ATC entry. */
goto create_atc_entry;
#ifdef DEBUG
asm volatile ("ptestr #1,%2@,#7,%0\n\t"
"pmove %%psr,%1@"
: "=a&" (desc)
: "a" (&temp), "a" (addr));
#else
asm volatile ("ptestr #1,%1@,#7\n\t"
"pmove %%psr,%0@"
: : "a" (&temp), "a" (addr));
#endif
mmusr = temp;
#ifdef DEBUG
printk ("mmusr is %#x for addr %#lx in task %p\n",
mmusr, addr, current);
printk ("descriptor address is %#lx, contents %#lx\n",
__va(desc), *(unsigned long *)__va(desc));
#endif
if (mmusr & MMU_I)
do_page_fault (&fp->ptregs, addr, 0);
else if (mmusr & (MMU_B|MMU_L|MMU_S)) {
printk ("invalid insn access at %#lx from pc %#lx\n",
addr, fp->ptregs.pc);
#ifdef DEBUG
printk("Unknown SIGSEGV - 2\n");
#endif
die_if_kernel("Oops",&fp->ptregs,mmusr);
force_sig(SIGSEGV, current);
return;
}
create_atc_entry:
/* setup an ATC entry for the access about to be retried */
asm volatile ("ploadr #2,%0@" : /* no outputs */
: "a" (addr));
}
#endif /* CPU_M68020_OR_M68030 */
#endif /* !CONFIG_SUN3 */
asmlinkage void buserr_c(struct frame *fp)
{
/* Only set esp0 if coming from user mode */
if (user_mode(&fp->ptregs))
current->thread.esp0 = (unsigned long) fp;
#ifdef DEBUG
printk ("*** Bus Error *** Format is %x\n", fp->ptregs.format);
#endif
switch (fp->ptregs.format) {
#if defined (CONFIG_M68060)
case 4: /* 68060 access error */
access_error060 (fp);
break;
#endif
#if defined (CONFIG_M68040)
case 0x7: /* 68040 access error */
access_error040 (fp);
break;
#endif
#if defined (CPU_M68020_OR_M68030)
case 0xa:
case 0xb:
bus_error030 (fp);
break;
#endif
default:
die_if_kernel("bad frame format",&fp->ptregs,0);
#ifdef DEBUG
printk("Unknown SIGSEGV - 4\n");
#endif
force_sig(SIGSEGV, current);
}
}
static int kstack_depth_to_print = 48;
void show_trace(unsigned long *stack)
{
unsigned long *endstack;
unsigned long addr;
int i;
printk("Call Trace:");
addr = (unsigned long)stack + THREAD_SIZE - 1;
endstack = (unsigned long *)(addr & -THREAD_SIZE);
i = 0;
while (stack + 1 <= endstack) {
addr = *stack++;
/*
* If the address is either in the text segment of the
* kernel, or in the region which contains vmalloc'ed
* memory, it *may* be the address of a calling
* routine; if so, print it so that someone tracing
* down the cause of the crash will be able to figure
* out the call path that was taken.
*/
if (__kernel_text_address(addr)) {
#ifndef CONFIG_KALLSYMS
if (i % 5 == 0)
printk("\n ");
#endif
printk(" [<%08lx>] %pS\n", addr, (void *)addr);
i++;
}
}
printk("\n");
}
void show_registers(struct pt_regs *regs)
{
struct frame *fp = (struct frame *)regs;
mm_segment_t old_fs = get_fs();
u16 c, *cp;
unsigned long addr;
int i;
print_modules();
printk("PC: [<%08lx>] %pS\n", regs->pc, (void *)regs->pc);
printk("SR: %04x SP: %p a2: %08lx\n", regs->sr, regs, regs->a2);
printk("d0: %08lx d1: %08lx d2: %08lx d3: %08lx\n",
regs->d0, regs->d1, regs->d2, regs->d3);
printk("d4: %08lx d5: %08lx a0: %08lx a1: %08lx\n",
regs->d4, regs->d5, regs->a0, regs->a1);
printk("Process %s (pid: %d, task=%p)\n",
current->comm, task_pid_nr(current), current);
addr = (unsigned long)&fp->un;
printk("Frame format=%X ", regs->format);
switch (regs->format) {
case 0x2:
printk("instr addr=%08lx\n", fp->un.fmt2.iaddr);
addr += sizeof(fp->un.fmt2);
break;
case 0x3:
printk("eff addr=%08lx\n", fp->un.fmt3.effaddr);
addr += sizeof(fp->un.fmt3);
break;
case 0x4:
printk((CPU_IS_060 ? "fault addr=%08lx fslw=%08lx\n"
: "eff addr=%08lx pc=%08lx\n"),
fp->un.fmt4.effaddr, fp->un.fmt4.pc);
addr += sizeof(fp->un.fmt4);
break;
case 0x7:
printk("eff addr=%08lx ssw=%04x faddr=%08lx\n",
fp->un.fmt7.effaddr, fp->un.fmt7.ssw, fp->un.fmt7.faddr);
printk("wb 1 stat/addr/data: %04x %08lx %08lx\n",
fp->un.fmt7.wb1s, fp->un.fmt7.wb1a, fp->un.fmt7.wb1dpd0);
printk("wb 2 stat/addr/data: %04x %08lx %08lx\n",
fp->un.fmt7.wb2s, fp->un.fmt7.wb2a, fp->un.fmt7.wb2d);
printk("wb 3 stat/addr/data: %04x %08lx %08lx\n",
fp->un.fmt7.wb3s, fp->un.fmt7.wb3a, fp->un.fmt7.wb3d);
printk("push data: %08lx %08lx %08lx %08lx\n",
fp->un.fmt7.wb1dpd0, fp->un.fmt7.pd1, fp->un.fmt7.pd2,
fp->un.fmt7.pd3);
addr += sizeof(fp->un.fmt7);
break;
case 0x9:
printk("instr addr=%08lx\n", fp->un.fmt9.iaddr);
addr += sizeof(fp->un.fmt9);
break;
case 0xa:
printk("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n",
fp->un.fmta.ssw, fp->un.fmta.isc, fp->un.fmta.isb,
fp->un.fmta.daddr, fp->un.fmta.dobuf);
addr += sizeof(fp->un.fmta);
break;
case 0xb:
printk("ssw=%04x isc=%04x isb=%04x daddr=%08lx dobuf=%08lx\n",
fp->un.fmtb.ssw, fp->un.fmtb.isc, fp->un.fmtb.isb,
fp->un.fmtb.daddr, fp->un.fmtb.dobuf);
printk("baddr=%08lx dibuf=%08lx ver=%x\n",
fp->un.fmtb.baddr, fp->un.fmtb.dibuf, fp->un.fmtb.ver);
addr += sizeof(fp->un.fmtb);
break;
default:
printk("\n");
}
show_stack(NULL, (unsigned long *)addr);
printk("Code:");
set_fs(KERNEL_DS);
cp = (u16 *)regs->pc;
for (i = -8; i < 16; i++) {
if (get_user(c, cp + i) && i >= 0) {
printk(" Bad PC value.");
break;
}
printk(i ? " %04x" : " <%04x>", c);
}
set_fs(old_fs);
printk ("\n");
}
void show_stack(struct task_struct *task, unsigned long *stack)
{
unsigned long *p;
unsigned long *endstack;
int i;
if (!stack) {
if (task)
stack = (unsigned long *)task->thread.esp0;
else
stack = (unsigned long *)&stack;
}
endstack = (unsigned long *)(((unsigned long)stack + THREAD_SIZE - 1) & -THREAD_SIZE);
printk("Stack from %08lx:", (unsigned long)stack);
p = stack;
for (i = 0; i < kstack_depth_to_print; i++) {
if (p + 1 > endstack)
break;
if (i % 8 == 0)
printk("\n ");
printk(" %08lx", *p++);
}
printk("\n");
show_trace(stack);
}
/*
* The architecture-independent backtrace generator
*/
void dump_stack(void)
{
unsigned long stack;
show_trace(&stack);
}
EXPORT_SYMBOL(dump_stack);
void bad_super_trap (struct frame *fp)
{
console_verbose();
if (fp->ptregs.vector < 4 * ARRAY_SIZE(vec_names))
printk ("*** %s *** FORMAT=%X\n",
vec_names[(fp->ptregs.vector) >> 2],
fp->ptregs.format);
else
printk ("*** Exception %d *** FORMAT=%X\n",
(fp->ptregs.vector) >> 2,
fp->ptregs.format);
if (fp->ptregs.vector >> 2 == VEC_ADDRERR && CPU_IS_020_OR_030) {
unsigned short ssw = fp->un.fmtb.ssw;
printk ("SSW=%#06x ", ssw);
if (ssw & RC)
printk ("Pipe stage C instruction fault at %#010lx\n",
(fp->ptregs.format) == 0xA ?
fp->ptregs.pc + 2 : fp->un.fmtb.baddr - 2);
if (ssw & RB)
printk ("Pipe stage B instruction fault at %#010lx\n",
(fp->ptregs.format) == 0xA ?
fp->ptregs.pc + 4 : fp->un.fmtb.baddr);
if (ssw & DF)
printk ("Data %s fault at %#010lx in %s (pc=%#lx)\n",
ssw & RW ? "read" : "write",
fp->un.fmtb.daddr, space_names[ssw & DFC],
fp->ptregs.pc);
}
printk ("Current process id is %d\n", task_pid_nr(current));
die_if_kernel("BAD KERNEL TRAP", &fp->ptregs, 0);
}
asmlinkage void trap_c(struct frame *fp)
{
int sig;
siginfo_t info;
if (fp->ptregs.sr & PS_S) {
if (fp->ptregs.vector == VEC_TRACE << 2) {
/* traced a trapping instruction on a 68020/30,
* real exception will be executed afterwards.
*/
} else if (!handle_kernel_fault(&fp->ptregs))
bad_super_trap(fp);
return;
}
/* send the appropriate signal to the user program */
switch ((fp->ptregs.vector) >> 2) {
case VEC_ADDRERR:
info.si_code = BUS_ADRALN;
sig = SIGBUS;
break;
case VEC_ILLEGAL:
case VEC_LINE10:
case VEC_LINE11:
info.si_code = ILL_ILLOPC;
sig = SIGILL;
break;
case VEC_PRIV:
info.si_code = ILL_PRVOPC;
sig = SIGILL;
break;
case VEC_COPROC:
info.si_code = ILL_COPROC;
sig = SIGILL;
break;
case VEC_TRAP1:
case VEC_TRAP2:
case VEC_TRAP3:
case VEC_TRAP4:
case VEC_TRAP5:
case VEC_TRAP6:
case VEC_TRAP7:
case VEC_TRAP8:
case VEC_TRAP9:
case VEC_TRAP10:
case VEC_TRAP11:
case VEC_TRAP12:
case VEC_TRAP13:
case VEC_TRAP14:
info.si_code = ILL_ILLTRP;
sig = SIGILL;
break;
case VEC_FPBRUC:
case VEC_FPOE:
case VEC_FPNAN:
info.si_code = FPE_FLTINV;
sig = SIGFPE;
break;
case VEC_FPIR:
info.si_code = FPE_FLTRES;
sig = SIGFPE;
break;
case VEC_FPDIVZ:
info.si_code = FPE_FLTDIV;
sig = SIGFPE;
break;
case VEC_FPUNDER:
info.si_code = FPE_FLTUND;
sig = SIGFPE;
break;
case VEC_FPOVER:
info.si_code = FPE_FLTOVF;
sig = SIGFPE;
break;
case VEC_ZERODIV:
info.si_code = FPE_INTDIV;
sig = SIGFPE;
break;
case VEC_CHK:
case VEC_TRAP:
info.si_code = FPE_INTOVF;
sig = SIGFPE;
break;
case VEC_TRACE: /* ptrace single step */
info.si_code = TRAP_TRACE;
sig = SIGTRAP;
break;
case VEC_TRAP15: /* breakpoint */
info.si_code = TRAP_BRKPT;
sig = SIGTRAP;
break;
default:
info.si_code = ILL_ILLOPC;
sig = SIGILL;
break;
}
info.si_signo = sig;
info.si_errno = 0;
switch (fp->ptregs.format) {
default:
info.si_addr = (void *) fp->ptregs.pc;
break;
case 2:
info.si_addr = (void *) fp->un.fmt2.iaddr;
break;
case 7:
info.si_addr = (void *) fp->un.fmt7.effaddr;
break;
case 9:
info.si_addr = (void *) fp->un.fmt9.iaddr;
break;
case 10:
info.si_addr = (void *) fp->un.fmta.daddr;
break;
case 11:
info.si_addr = (void *) fp->un.fmtb.daddr;
break;
}
force_sig_info (sig, &info, current);
}
void die_if_kernel (char *str, struct pt_regs *fp, int nr)
{
if (!(fp->sr & PS_S))
return;
console_verbose();
printk("%s: %08x\n",str,nr);
show_registers(fp);
add_taint(TAINT_DIE);
do_exit(SIGSEGV);
}
/*
* This function is called if an error occur while accessing
* user-space from the fpsp040 code.
*/
asmlinkage void fpsp040_die(void)
{
do_exit(SIGSEGV);
}
#ifdef CONFIG_M68KFPU_EMU
asmlinkage void fpemu_signal(int signal, int code, void *addr)
{
siginfo_t info;
info.si_signo = signal;
info.si_errno = 0;
info.si_code = code;
info.si_addr = addr;
force_sig_info(signal, &info, current);
}
#endif
PHDRS #ifdef CONFIG_MMU
{ #include "vmlinux.lds_mm.S"
text PT_LOAD FILEHDR PHDRS FLAGS (7);
data PT_LOAD FLAGS (7);
}
#ifdef CONFIG_SUN3
#include "vmlinux-sun3.lds"
#else #else
#include "vmlinux-std.lds" #include "vmlinux.lds_no.S"
#endif #endif
PHDRS
{
text PT_LOAD FILEHDR PHDRS FLAGS (7);
data PT_LOAD FLAGS (7);
}
#ifdef CONFIG_SUN3
#include "vmlinux-sun3.lds"
#else
#include "vmlinux-std.lds"
#endif
# ifdef CONFIG_MMU
# Makefile for m68k-specific library files.. include arch/m68k/lib/Makefile_mm
# else
include arch/m68k/lib/Makefile_no
lib-y := ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \ endif
checksum.o string.o uaccess.o
#
# Makefile for m68k-specific library files..
#
lib-y := ashldi3.o ashrdi3.o lshrdi3.o muldi3.o \
checksum.o string.o uaccess.o
/* #ifdef CONFIG_MMU
* INET An implementation of the TCP/IP protocol suite for the LINUX #include "checksum_mm.c"
* operating system. INET is implemented using the BSD Socket #else
* interface as the means of communication with the user level. #include "checksum_no.c"
* #endif
* IP/TCP/UDP checksumming routines
*
* Authors: Jorge Cwik, <jorge@laser.satlink.net>
* Arnt Gulbrandsen, <agulbra@nvg.unit.no>
* Tom May, <ftom@netcom.com>
* Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de>
* Lots of code moved from tcp.c and ip.c; see those files
* for more names.
*
* 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek:
* Fixed some nasty bugs, causing some horrible crashes.
* A: At some points, the sum (%0) was used as
* length-counter instead of the length counter
* (%1). Thanks to Roman Hodek for pointing this out.
* B: GCC seems to mess up if one uses too many
* data-registers to hold input values and one tries to
* specify d0 and d1 as scratch registers. Letting gcc
* choose these registers itself solves the problem.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* 1998/8/31 Andreas Schwab:
* Zero out rest of buffer on exception in
* csum_partial_copy_from_user.
*/
#include <linux/module.h>
#include <net/checksum.h>
/*
* computes a partial checksum, e.g. for TCP/UDP fragments
*/
__wsum csum_partial(const void *buff, int len, __wsum sum)
{
unsigned long tmp1, tmp2;
/*
* Experiments with ethernet and slip connections show that buff
* is aligned on either a 2-byte or 4-byte boundary.
*/
__asm__("movel %2,%3\n\t"
"btst #1,%3\n\t" /* Check alignment */
"jeq 2f\n\t"
"subql #2,%1\n\t" /* buff%4==2: treat first word */
"jgt 1f\n\t"
"addql #2,%1\n\t" /* len was == 2, treat only rest */
"jra 4f\n"
"1:\t"
"addw %2@+,%0\n\t" /* add first word to sum */
"clrl %3\n\t"
"addxl %3,%0\n" /* add X bit */
"2:\t"
/* unrolled loop for the main part: do 8 longs at once */
"movel %1,%3\n\t" /* save len in tmp1 */
"lsrl #5,%1\n\t" /* len/32 */
"jeq 2f\n\t" /* not enough... */
"subql #1,%1\n"
"1:\t"
"movel %2@+,%4\n\t"
"addxl %4,%0\n\t"
"movel %2@+,%4\n\t"
"addxl %4,%0\n\t"
"movel %2@+,%4\n\t"
"addxl %4,%0\n\t"
"movel %2@+,%4\n\t"
"addxl %4,%0\n\t"
"movel %2@+,%4\n\t"
"addxl %4,%0\n\t"
"movel %2@+,%4\n\t"
"addxl %4,%0\n\t"
"movel %2@+,%4\n\t"
"addxl %4,%0\n\t"
"movel %2@+,%4\n\t"
"addxl %4,%0\n\t"
"dbra %1,1b\n\t"
"clrl %4\n\t"
"addxl %4,%0\n\t" /* add X bit */
"clrw %1\n\t"
"subql #1,%1\n\t"
"jcc 1b\n"
"2:\t"
"movel %3,%1\n\t" /* restore len from tmp1 */
"andw #0x1c,%3\n\t" /* number of rest longs */
"jeq 4f\n\t"
"lsrw #2,%3\n\t"
"subqw #1,%3\n"
"3:\t"
/* loop for rest longs */
"movel %2@+,%4\n\t"
"addxl %4,%0\n\t"
"dbra %3,3b\n\t"
"clrl %4\n\t"
"addxl %4,%0\n" /* add X bit */
"4:\t"
/* now check for rest bytes that do not fit into longs */
"andw #3,%1\n\t"
"jeq 7f\n\t"
"clrl %4\n\t" /* clear tmp2 for rest bytes */
"subqw #2,%1\n\t"
"jlt 5f\n\t"
"movew %2@+,%4\n\t" /* have rest >= 2: get word */
"swap %4\n\t" /* into bits 16..31 */
"tstw %1\n\t" /* another byte? */
"jeq 6f\n"
"5:\t"
"moveb %2@,%4\n\t" /* have odd rest: get byte */
"lslw #8,%4\n\t" /* into bits 8..15; 16..31 untouched */
"6:\t"
"addl %4,%0\n\t" /* now add rest long to sum */
"clrl %4\n\t"
"addxl %4,%0\n" /* add X bit */
"7:\t"
: "=d" (sum), "=d" (len), "=a" (buff),
"=&d" (tmp1), "=&d" (tmp2)
: "0" (sum), "1" (len), "2" (buff)
);
return(sum);
}
EXPORT_SYMBOL(csum_partial);
/*
* copy from user space while checksumming, with exception handling.
*/
__wsum
csum_partial_copy_from_user(const void __user *src, void *dst,
int len, __wsum sum, int *csum_err)
{
/*
* GCC doesn't like more than 10 operands for the asm
* statements so we have to use tmp2 for the error
* code.
*/
unsigned long tmp1, tmp2;
__asm__("movel %2,%4\n\t"
"btst #1,%4\n\t" /* Check alignment */
"jeq 2f\n\t"
"subql #2,%1\n\t" /* buff%4==2: treat first word */
"jgt 1f\n\t"
"addql #2,%1\n\t" /* len was == 2, treat only rest */
"jra 4f\n"
"1:\n"
"10:\t"
"movesw %2@+,%4\n\t" /* add first word to sum */
"addw %4,%0\n\t"
"movew %4,%3@+\n\t"
"clrl %4\n\t"
"addxl %4,%0\n" /* add X bit */
"2:\t"
/* unrolled loop for the main part: do 8 longs at once */
"movel %1,%4\n\t" /* save len in tmp1 */
"lsrl #5,%1\n\t" /* len/32 */
"jeq 2f\n\t" /* not enough... */
"subql #1,%1\n"
"1:\n"
"11:\t"
"movesl %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"12:\t"
"movesl %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"13:\t"
"movesl %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"14:\t"
"movesl %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"15:\t"
"movesl %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"16:\t"
"movesl %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"17:\t"
"movesl %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"18:\t"
"movesl %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"dbra %1,1b\n\t"
"clrl %5\n\t"
"addxl %5,%0\n\t" /* add X bit */
"clrw %1\n\t"
"subql #1,%1\n\t"
"jcc 1b\n"
"2:\t"
"movel %4,%1\n\t" /* restore len from tmp1 */
"andw #0x1c,%4\n\t" /* number of rest longs */
"jeq 4f\n\t"
"lsrw #2,%4\n\t"
"subqw #1,%4\n"
"3:\n"
/* loop for rest longs */
"19:\t"
"movesl %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"dbra %4,3b\n\t"
"clrl %5\n\t"
"addxl %5,%0\n" /* add X bit */
"4:\t"
/* now check for rest bytes that do not fit into longs */
"andw #3,%1\n\t"
"jeq 7f\n\t"
"clrl %5\n\t" /* clear tmp2 for rest bytes */
"subqw #2,%1\n\t"
"jlt 5f\n\t"
"20:\t"
"movesw %2@+,%5\n\t" /* have rest >= 2: get word */
"movew %5,%3@+\n\t"
"swap %5\n\t" /* into bits 16..31 */
"tstw %1\n\t" /* another byte? */
"jeq 6f\n"
"5:\n"
"21:\t"
"movesb %2@,%5\n\t" /* have odd rest: get byte */
"moveb %5,%3@+\n\t"
"lslw #8,%5\n\t" /* into bits 8..15; 16..31 untouched */
"6:\t"
"addl %5,%0\n\t" /* now add rest long to sum */
"clrl %5\n\t"
"addxl %5,%0\n\t" /* add X bit */
"7:\t"
"clrl %5\n" /* no error - clear return value */
"8:\n"
".section .fixup,\"ax\"\n"
".even\n"
/* If any exception occurs zero out the rest.
Similarities with the code above are intentional :-) */
"90:\t"
"clrw %3@+\n\t"
"movel %1,%4\n\t"
"lsrl #5,%1\n\t"
"jeq 1f\n\t"
"subql #1,%1\n"
"91:\t"
"clrl %3@+\n"
"92:\t"
"clrl %3@+\n"
"93:\t"
"clrl %3@+\n"
"94:\t"
"clrl %3@+\n"
"95:\t"
"clrl %3@+\n"
"96:\t"
"clrl %3@+\n"
"97:\t"
"clrl %3@+\n"
"98:\t"
"clrl %3@+\n\t"
"dbra %1,91b\n\t"
"clrw %1\n\t"
"subql #1,%1\n\t"
"jcc 91b\n"
"1:\t"
"movel %4,%1\n\t"
"andw #0x1c,%4\n\t"
"jeq 1f\n\t"
"lsrw #2,%4\n\t"
"subqw #1,%4\n"
"99:\t"
"clrl %3@+\n\t"
"dbra %4,99b\n\t"
"1:\t"
"andw #3,%1\n\t"
"jeq 9f\n"
"100:\t"
"clrw %3@+\n\t"
"tstw %1\n\t"
"jeq 9f\n"
"101:\t"
"clrb %3@+\n"
"9:\t"
#define STR(X) STR1(X)
#define STR1(X) #X
"moveq #-" STR(EFAULT) ",%5\n\t"
"jra 8b\n"
".previous\n"
".section __ex_table,\"a\"\n"
".long 10b,90b\n"
".long 11b,91b\n"
".long 12b,92b\n"
".long 13b,93b\n"
".long 14b,94b\n"
".long 15b,95b\n"
".long 16b,96b\n"
".long 17b,97b\n"
".long 18b,98b\n"
".long 19b,99b\n"
".long 20b,100b\n"
".long 21b,101b\n"
".previous"
: "=d" (sum), "=d" (len), "=a" (src), "=a" (dst),
"=&d" (tmp1), "=d" (tmp2)
: "0" (sum), "1" (len), "2" (src), "3" (dst)
);
*csum_err = tmp2;
return(sum);
}
EXPORT_SYMBOL(csum_partial_copy_from_user);
/*
* copy from kernel space while checksumming, otherwise like csum_partial
*/
__wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
{
unsigned long tmp1, tmp2;
__asm__("movel %2,%4\n\t"
"btst #1,%4\n\t" /* Check alignment */
"jeq 2f\n\t"
"subql #2,%1\n\t" /* buff%4==2: treat first word */
"jgt 1f\n\t"
"addql #2,%1\n\t" /* len was == 2, treat only rest */
"jra 4f\n"
"1:\t"
"movew %2@+,%4\n\t" /* add first word to sum */
"addw %4,%0\n\t"
"movew %4,%3@+\n\t"
"clrl %4\n\t"
"addxl %4,%0\n" /* add X bit */
"2:\t"
/* unrolled loop for the main part: do 8 longs at once */
"movel %1,%4\n\t" /* save len in tmp1 */
"lsrl #5,%1\n\t" /* len/32 */
"jeq 2f\n\t" /* not enough... */
"subql #1,%1\n"
"1:\t"
"movel %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"movel %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"movel %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"movel %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"movel %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"movel %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"movel %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"movel %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"dbra %1,1b\n\t"
"clrl %5\n\t"
"addxl %5,%0\n\t" /* add X bit */
"clrw %1\n\t"
"subql #1,%1\n\t"
"jcc 1b\n"
"2:\t"
"movel %4,%1\n\t" /* restore len from tmp1 */
"andw #0x1c,%4\n\t" /* number of rest longs */
"jeq 4f\n\t"
"lsrw #2,%4\n\t"
"subqw #1,%4\n"
"3:\t"
/* loop for rest longs */
"movel %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"dbra %4,3b\n\t"
"clrl %5\n\t"
"addxl %5,%0\n" /* add X bit */
"4:\t"
/* now check for rest bytes that do not fit into longs */
"andw #3,%1\n\t"
"jeq 7f\n\t"
"clrl %5\n\t" /* clear tmp2 for rest bytes */
"subqw #2,%1\n\t"
"jlt 5f\n\t"
"movew %2@+,%5\n\t" /* have rest >= 2: get word */
"movew %5,%3@+\n\t"
"swap %5\n\t" /* into bits 16..31 */
"tstw %1\n\t" /* another byte? */
"jeq 6f\n"
"5:\t"
"moveb %2@,%5\n\t" /* have odd rest: get byte */
"moveb %5,%3@+\n\t"
"lslw #8,%5\n" /* into bits 8..15; 16..31 untouched */
"6:\t"
"addl %5,%0\n\t" /* now add rest long to sum */
"clrl %5\n\t"
"addxl %5,%0\n" /* add X bit */
"7:\t"
: "=d" (sum), "=d" (len), "=a" (src), "=a" (dst),
"=&d" (tmp1), "=&d" (tmp2)
: "0" (sum), "1" (len), "2" (src), "3" (dst)
);
return(sum);
}
EXPORT_SYMBOL(csum_partial_copy_nocheck);
/*
* INET An implementation of the TCP/IP protocol suite for the LINUX
* operating system. INET is implemented using the BSD Socket
* interface as the means of communication with the user level.
*
* IP/TCP/UDP checksumming routines
*
* Authors: Jorge Cwik, <jorge@laser.satlink.net>
* Arnt Gulbrandsen, <agulbra@nvg.unit.no>
* Tom May, <ftom@netcom.com>
* Andreas Schwab, <schwab@issan.informatik.uni-dortmund.de>
* Lots of code moved from tcp.c and ip.c; see those files
* for more names.
*
* 03/02/96 Jes Sorensen, Andreas Schwab, Roman Hodek:
* Fixed some nasty bugs, causing some horrible crashes.
* A: At some points, the sum (%0) was used as
* length-counter instead of the length counter
* (%1). Thanks to Roman Hodek for pointing this out.
* B: GCC seems to mess up if one uses too many
* data-registers to hold input values and one tries to
* specify d0 and d1 as scratch registers. Letting gcc
* choose these registers itself solves the problem.
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version
* 2 of the License, or (at your option) any later version.
*
* 1998/8/31 Andreas Schwab:
* Zero out rest of buffer on exception in
* csum_partial_copy_from_user.
*/
#include <linux/module.h>
#include <net/checksum.h>
/*
* computes a partial checksum, e.g. for TCP/UDP fragments
*/
__wsum csum_partial(const void *buff, int len, __wsum sum)
{
unsigned long tmp1, tmp2;
/*
* Experiments with ethernet and slip connections show that buff
* is aligned on either a 2-byte or 4-byte boundary.
*/
__asm__("movel %2,%3\n\t"
"btst #1,%3\n\t" /* Check alignment */
"jeq 2f\n\t"
"subql #2,%1\n\t" /* buff%4==2: treat first word */
"jgt 1f\n\t"
"addql #2,%1\n\t" /* len was == 2, treat only rest */
"jra 4f\n"
"1:\t"
"addw %2@+,%0\n\t" /* add first word to sum */
"clrl %3\n\t"
"addxl %3,%0\n" /* add X bit */
"2:\t"
/* unrolled loop for the main part: do 8 longs at once */
"movel %1,%3\n\t" /* save len in tmp1 */
"lsrl #5,%1\n\t" /* len/32 */
"jeq 2f\n\t" /* not enough... */
"subql #1,%1\n"
"1:\t"
"movel %2@+,%4\n\t"
"addxl %4,%0\n\t"
"movel %2@+,%4\n\t"
"addxl %4,%0\n\t"
"movel %2@+,%4\n\t"
"addxl %4,%0\n\t"
"movel %2@+,%4\n\t"
"addxl %4,%0\n\t"
"movel %2@+,%4\n\t"
"addxl %4,%0\n\t"
"movel %2@+,%4\n\t"
"addxl %4,%0\n\t"
"movel %2@+,%4\n\t"
"addxl %4,%0\n\t"
"movel %2@+,%4\n\t"
"addxl %4,%0\n\t"
"dbra %1,1b\n\t"
"clrl %4\n\t"
"addxl %4,%0\n\t" /* add X bit */
"clrw %1\n\t"
"subql #1,%1\n\t"
"jcc 1b\n"
"2:\t"
"movel %3,%1\n\t" /* restore len from tmp1 */
"andw #0x1c,%3\n\t" /* number of rest longs */
"jeq 4f\n\t"
"lsrw #2,%3\n\t"
"subqw #1,%3\n"
"3:\t"
/* loop for rest longs */
"movel %2@+,%4\n\t"
"addxl %4,%0\n\t"
"dbra %3,3b\n\t"
"clrl %4\n\t"
"addxl %4,%0\n" /* add X bit */
"4:\t"
/* now check for rest bytes that do not fit into longs */
"andw #3,%1\n\t"
"jeq 7f\n\t"
"clrl %4\n\t" /* clear tmp2 for rest bytes */
"subqw #2,%1\n\t"
"jlt 5f\n\t"
"movew %2@+,%4\n\t" /* have rest >= 2: get word */
"swap %4\n\t" /* into bits 16..31 */
"tstw %1\n\t" /* another byte? */
"jeq 6f\n"
"5:\t"
"moveb %2@,%4\n\t" /* have odd rest: get byte */
"lslw #8,%4\n\t" /* into bits 8..15; 16..31 untouched */
"6:\t"
"addl %4,%0\n\t" /* now add rest long to sum */
"clrl %4\n\t"
"addxl %4,%0\n" /* add X bit */
"7:\t"
: "=d" (sum), "=d" (len), "=a" (buff),
"=&d" (tmp1), "=&d" (tmp2)
: "0" (sum), "1" (len), "2" (buff)
);
return(sum);
}
EXPORT_SYMBOL(csum_partial);
/*
* copy from user space while checksumming, with exception handling.
*/
__wsum
csum_partial_copy_from_user(const void __user *src, void *dst,
int len, __wsum sum, int *csum_err)
{
/*
* GCC doesn't like more than 10 operands for the asm
* statements so we have to use tmp2 for the error
* code.
*/
unsigned long tmp1, tmp2;
__asm__("movel %2,%4\n\t"
"btst #1,%4\n\t" /* Check alignment */
"jeq 2f\n\t"
"subql #2,%1\n\t" /* buff%4==2: treat first word */
"jgt 1f\n\t"
"addql #2,%1\n\t" /* len was == 2, treat only rest */
"jra 4f\n"
"1:\n"
"10:\t"
"movesw %2@+,%4\n\t" /* add first word to sum */
"addw %4,%0\n\t"
"movew %4,%3@+\n\t"
"clrl %4\n\t"
"addxl %4,%0\n" /* add X bit */
"2:\t"
/* unrolled loop for the main part: do 8 longs at once */
"movel %1,%4\n\t" /* save len in tmp1 */
"lsrl #5,%1\n\t" /* len/32 */
"jeq 2f\n\t" /* not enough... */
"subql #1,%1\n"
"1:\n"
"11:\t"
"movesl %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"12:\t"
"movesl %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"13:\t"
"movesl %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"14:\t"
"movesl %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"15:\t"
"movesl %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"16:\t"
"movesl %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"17:\t"
"movesl %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"18:\t"
"movesl %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"dbra %1,1b\n\t"
"clrl %5\n\t"
"addxl %5,%0\n\t" /* add X bit */
"clrw %1\n\t"
"subql #1,%1\n\t"
"jcc 1b\n"
"2:\t"
"movel %4,%1\n\t" /* restore len from tmp1 */
"andw #0x1c,%4\n\t" /* number of rest longs */
"jeq 4f\n\t"
"lsrw #2,%4\n\t"
"subqw #1,%4\n"
"3:\n"
/* loop for rest longs */
"19:\t"
"movesl %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"dbra %4,3b\n\t"
"clrl %5\n\t"
"addxl %5,%0\n" /* add X bit */
"4:\t"
/* now check for rest bytes that do not fit into longs */
"andw #3,%1\n\t"
"jeq 7f\n\t"
"clrl %5\n\t" /* clear tmp2 for rest bytes */
"subqw #2,%1\n\t"
"jlt 5f\n\t"
"20:\t"
"movesw %2@+,%5\n\t" /* have rest >= 2: get word */
"movew %5,%3@+\n\t"
"swap %5\n\t" /* into bits 16..31 */
"tstw %1\n\t" /* another byte? */
"jeq 6f\n"
"5:\n"
"21:\t"
"movesb %2@,%5\n\t" /* have odd rest: get byte */
"moveb %5,%3@+\n\t"
"lslw #8,%5\n\t" /* into bits 8..15; 16..31 untouched */
"6:\t"
"addl %5,%0\n\t" /* now add rest long to sum */
"clrl %5\n\t"
"addxl %5,%0\n\t" /* add X bit */
"7:\t"
"clrl %5\n" /* no error - clear return value */
"8:\n"
".section .fixup,\"ax\"\n"
".even\n"
/* If any exception occurs zero out the rest.
Similarities with the code above are intentional :-) */
"90:\t"
"clrw %3@+\n\t"
"movel %1,%4\n\t"
"lsrl #5,%1\n\t"
"jeq 1f\n\t"
"subql #1,%1\n"
"91:\t"
"clrl %3@+\n"
"92:\t"
"clrl %3@+\n"
"93:\t"
"clrl %3@+\n"
"94:\t"
"clrl %3@+\n"
"95:\t"
"clrl %3@+\n"
"96:\t"
"clrl %3@+\n"
"97:\t"
"clrl %3@+\n"
"98:\t"
"clrl %3@+\n\t"
"dbra %1,91b\n\t"
"clrw %1\n\t"
"subql #1,%1\n\t"
"jcc 91b\n"
"1:\t"
"movel %4,%1\n\t"
"andw #0x1c,%4\n\t"
"jeq 1f\n\t"
"lsrw #2,%4\n\t"
"subqw #1,%4\n"
"99:\t"
"clrl %3@+\n\t"
"dbra %4,99b\n\t"
"1:\t"
"andw #3,%1\n\t"
"jeq 9f\n"
"100:\t"
"clrw %3@+\n\t"
"tstw %1\n\t"
"jeq 9f\n"
"101:\t"
"clrb %3@+\n"
"9:\t"
#define STR(X) STR1(X)
#define STR1(X) #X
"moveq #-" STR(EFAULT) ",%5\n\t"
"jra 8b\n"
".previous\n"
".section __ex_table,\"a\"\n"
".long 10b,90b\n"
".long 11b,91b\n"
".long 12b,92b\n"
".long 13b,93b\n"
".long 14b,94b\n"
".long 15b,95b\n"
".long 16b,96b\n"
".long 17b,97b\n"
".long 18b,98b\n"
".long 19b,99b\n"
".long 20b,100b\n"
".long 21b,101b\n"
".previous"
: "=d" (sum), "=d" (len), "=a" (src), "=a" (dst),
"=&d" (tmp1), "=d" (tmp2)
: "0" (sum), "1" (len), "2" (src), "3" (dst)
);
*csum_err = tmp2;
return(sum);
}
EXPORT_SYMBOL(csum_partial_copy_from_user);
/*
* copy from kernel space while checksumming, otherwise like csum_partial
*/
__wsum
csum_partial_copy_nocheck(const void *src, void *dst, int len, __wsum sum)
{
unsigned long tmp1, tmp2;
__asm__("movel %2,%4\n\t"
"btst #1,%4\n\t" /* Check alignment */
"jeq 2f\n\t"
"subql #2,%1\n\t" /* buff%4==2: treat first word */
"jgt 1f\n\t"
"addql #2,%1\n\t" /* len was == 2, treat only rest */
"jra 4f\n"
"1:\t"
"movew %2@+,%4\n\t" /* add first word to sum */
"addw %4,%0\n\t"
"movew %4,%3@+\n\t"
"clrl %4\n\t"
"addxl %4,%0\n" /* add X bit */
"2:\t"
/* unrolled loop for the main part: do 8 longs at once */
"movel %1,%4\n\t" /* save len in tmp1 */
"lsrl #5,%1\n\t" /* len/32 */
"jeq 2f\n\t" /* not enough... */
"subql #1,%1\n"
"1:\t"
"movel %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"movel %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"movel %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"movel %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"movel %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"movel %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"movel %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"movel %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"dbra %1,1b\n\t"
"clrl %5\n\t"
"addxl %5,%0\n\t" /* add X bit */
"clrw %1\n\t"
"subql #1,%1\n\t"
"jcc 1b\n"
"2:\t"
"movel %4,%1\n\t" /* restore len from tmp1 */
"andw #0x1c,%4\n\t" /* number of rest longs */
"jeq 4f\n\t"
"lsrw #2,%4\n\t"
"subqw #1,%4\n"
"3:\t"
/* loop for rest longs */
"movel %2@+,%5\n\t"
"addxl %5,%0\n\t"
"movel %5,%3@+\n\t"
"dbra %4,3b\n\t"
"clrl %5\n\t"
"addxl %5,%0\n" /* add X bit */
"4:\t"
/* now check for rest bytes that do not fit into longs */
"andw #3,%1\n\t"
"jeq 7f\n\t"
"clrl %5\n\t" /* clear tmp2 for rest bytes */
"subqw #2,%1\n\t"
"jlt 5f\n\t"
"movew %2@+,%5\n\t" /* have rest >= 2: get word */
"movew %5,%3@+\n\t"
"swap %5\n\t" /* into bits 16..31 */
"tstw %1\n\t" /* another byte? */
"jeq 6f\n"
"5:\t"
"moveb %2@,%5\n\t" /* have odd rest: get byte */
"moveb %5,%3@+\n\t"
"lslw #8,%5\n" /* into bits 8..15; 16..31 untouched */
"6:\t"
"addl %5,%0\n\t" /* now add rest long to sum */
"clrl %5\n\t"
"addxl %5,%0\n" /* add X bit */
"7:\t"
: "=d" (sum), "=d" (len), "=a" (src), "=a" (dst),
"=&d" (tmp1), "=&d" (tmp2)
: "0" (sum), "1" (len), "2" (src), "3" (dst)
);
return(sum);
}
EXPORT_SYMBOL(csum_partial_copy_nocheck);
/* muldi3.c extracted from gcc-2.7.2.3/libgcc2.c and #ifdef CONFIG_MMU
gcc-2.7.2.3/longlong.h which is: */ #include "muldi3_mm.c"
/* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc. #else
#include "muldi3_no.c"
This file is part of GNU CC. #endif
GNU CC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
GNU CC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
#define BITS_PER_UNIT 8
#define umul_ppmm(w1, w0, u, v) \
__asm__ ("mulu%.l %3,%1:%0" \
: "=d" ((USItype)(w0)), \
"=d" ((USItype)(w1)) \
: "%0" ((USItype)(u)), \
"dmi" ((USItype)(v)))
#define __umulsidi3(u, v) \
({DIunion __w; \
umul_ppmm (__w.s.high, __w.s.low, u, v); \
__w.ll; })
typedef int SItype __attribute__ ((mode (SI)));
typedef unsigned int USItype __attribute__ ((mode (SI)));
typedef int DItype __attribute__ ((mode (DI)));
typedef int word_type __attribute__ ((mode (__word__)));
struct DIstruct {SItype high, low;};
typedef union
{
struct DIstruct s;
DItype ll;
} DIunion;
DItype
__muldi3 (DItype u, DItype v)
{
DIunion w;
DIunion uu, vv;
uu.ll = u,
vv.ll = v;
w.ll = __umulsidi3 (uu.s.low, vv.s.low);
w.s.high += ((USItype) uu.s.low * (USItype) vv.s.high
+ (USItype) uu.s.high * (USItype) vv.s.low);
return w.ll;
}
/* ashrdi3.c extracted from gcc-2.7.2/libgcc2.c which is: */ /* muldi3.c extracted from gcc-2.7.2.3/libgcc2.c and
gcc-2.7.2.3/longlong.h which is: */
/* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc. /* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
This file is part of GNU CC. This file is part of GNU CC.
...@@ -20,7 +21,19 @@ Boston, MA 02111-1307, USA. */ ...@@ -20,7 +21,19 @@ Boston, MA 02111-1307, USA. */
#define BITS_PER_UNIT 8 #define BITS_PER_UNIT 8
typedef int SItype __attribute__ ((mode (SI))); #define umul_ppmm(w1, w0, u, v) \
__asm__ ("mulu%.l %3,%1:%0" \
: "=d" ((USItype)(w0)), \
"=d" ((USItype)(w1)) \
: "%0" ((USItype)(u)), \
"dmi" ((USItype)(v)))
#define __umulsidi3(u, v) \
({DIunion __w; \
umul_ppmm (__w.s.high, __w.s.low, u, v); \
__w.ll; })
typedef int SItype __attribute__ ((mode (SI)));
typedef unsigned int USItype __attribute__ ((mode (SI))); typedef unsigned int USItype __attribute__ ((mode (SI)));
typedef int DItype __attribute__ ((mode (DI))); typedef int DItype __attribute__ ((mode (DI)));
typedef int word_type __attribute__ ((mode (__word__))); typedef int word_type __attribute__ ((mode (__word__)));
...@@ -34,30 +47,17 @@ typedef union ...@@ -34,30 +47,17 @@ typedef union
} DIunion; } DIunion;
DItype DItype
__ashrdi3 (DItype u, word_type b) __muldi3 (DItype u, DItype v)
{ {
DIunion w; DIunion w;
word_type bm; DIunion uu, vv;
DIunion uu;
uu.ll = u,
if (b == 0) vv.ll = v;
return u;
w.ll = __umulsidi3 (uu.s.low, vv.s.low);
uu.ll = u; w.s.high += ((USItype) uu.s.low * (USItype) vv.s.high
+ (USItype) uu.s.high * (USItype) vv.s.low);
bm = (sizeof (SItype) * BITS_PER_UNIT) - b;
if (bm <= 0)
{
/* w.s.high = 1..1 or 0..0 */
w.s.high = uu.s.high >> (sizeof (SItype) * BITS_PER_UNIT - 1);
w.s.low = uu.s.high >> -bm;
}
else
{
USItype carries = (USItype)uu.s.high << bm;
w.s.high = uu.s.high >> b;
w.s.low = ((USItype)uu.s.low >> b) | carries;
}
return w.ll; return w.ll;
} }
# ifdef CONFIG_MMU
# Makefile for the linux m68k-specific parts of the memory manager. include arch/m68k/mm/Makefile_mm
# else
include arch/m68k/mm/Makefile_no
obj-y := cache.o init.o fault.o hwtest.o endif
obj-$(CONFIG_MMU_MOTOROLA) += kmap.o memory.o motorola.o
obj-$(CONFIG_MMU_SUN3) += sun3kmap.o sun3mmu.o
#
# Makefile for the linux m68k-specific parts of the memory manager.
#
obj-y := cache.o init.o fault.o hwtest.o
obj-$(CONFIG_MMU_MOTOROLA) += kmap.o memory.o motorola.o
obj-$(CONFIG_MMU_SUN3) += sun3kmap.o sun3mmu.o
/* #ifdef CONFIG_MMU
* linux/arch/m68k/mm/init.c #include "init_mm.c"
* #else
* Copyright (C) 1995 Hamish Macdonald #include "init_no.c"
*
* Contains common initialization routines, specific init code moved
* to motorola.c and sun3mmu.c
*/
#include <linux/module.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/gfp.h>
#include <asm/setup.h>
#include <asm/uaccess.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/system.h>
#include <asm/machdep.h>
#include <asm/io.h>
#ifdef CONFIG_ATARI
#include <asm/atari_stram.h>
#endif
#include <asm/sections.h>
#include <asm/tlb.h>
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
pg_data_t pg_data_map[MAX_NUMNODES];
EXPORT_SYMBOL(pg_data_map);
int m68k_virt_to_node_shift;
#ifndef CONFIG_SINGLE_MEMORY_CHUNK
pg_data_t *pg_data_table[65];
EXPORT_SYMBOL(pg_data_table);
#endif
void __init m68k_setup_node(int node)
{
#ifndef CONFIG_SINGLE_MEMORY_CHUNK
struct mem_info *info = m68k_memory + node;
int i, end;
i = (unsigned long)phys_to_virt(info->addr) >> __virt_to_node_shift();
end = (unsigned long)phys_to_virt(info->addr + info->size - 1) >> __virt_to_node_shift();
for (; i <= end; i++) {
if (pg_data_table[i])
printk("overlap at %u for chunk %u\n", i, node);
pg_data_table[i] = pg_data_map + node;
}
#endif
pg_data_map[node].bdata = bootmem_node_data + node;
node_set_online(node);
}
/*
* ZERO_PAGE is a special page that is used for zero-initialized
* data and COW.
*/
void *empty_zero_page;
EXPORT_SYMBOL(empty_zero_page);
extern void init_pointer_table(unsigned long ptable);
/* References to section boundaries */
extern pmd_t *zero_pgtable;
void __init mem_init(void)
{
pg_data_t *pgdat;
int codepages = 0;
int datapages = 0;
int initpages = 0;
int i;
#ifdef CONFIG_ATARI
if (MACH_IS_ATARI)
atari_stram_mem_init_hook();
#endif
/* this will put all memory onto the freelists */
totalram_pages = num_physpages = 0;
for_each_online_pgdat(pgdat) {
num_physpages += pgdat->node_present_pages;
totalram_pages += free_all_bootmem_node(pgdat);
for (i = 0; i < pgdat->node_spanned_pages; i++) {
struct page *page = pgdat->node_mem_map + i;
char *addr = page_to_virt(page);
if (!PageReserved(page))
continue;
if (addr >= _text &&
addr < _etext)
codepages++;
else if (addr >= __init_begin &&
addr < __init_end)
initpages++;
else
datapages++;
}
}
#ifndef CONFIG_SUN3
/* insert pointer tables allocated so far into the tablelist */
init_pointer_table((unsigned long)kernel_pg_dir);
for (i = 0; i < PTRS_PER_PGD; i++) {
if (pgd_present(kernel_pg_dir[i]))
init_pointer_table(__pgd_page(kernel_pg_dir[i]));
}
/* insert also pointer table that we used to unmap the zero page */
if (zero_pgtable)
init_pointer_table((unsigned long)zero_pgtable);
#endif
printk("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init)\n",
nr_free_pages() << (PAGE_SHIFT-10),
totalram_pages << (PAGE_SHIFT-10),
codepages << (PAGE_SHIFT-10),
datapages << (PAGE_SHIFT-10),
initpages << (PAGE_SHIFT-10));
}
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
int pages = 0;
for (; start < end; start += PAGE_SIZE) {
ClearPageReserved(virt_to_page(start));
init_page_count(virt_to_page(start));
free_page(start);
totalram_pages++;
pages++;
}
printk ("Freeing initrd memory: %dk freed\n", pages);
}
#endif #endif
/*
* linux/arch/m68k/mm/init.c
*
* Copyright (C) 1995 Hamish Macdonald
*
* Contains common initialization routines, specific init code moved
* to motorola.c and sun3mmu.c
*/
#include <linux/module.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/swap.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/gfp.h>
#include <asm/setup.h>
#include <asm/uaccess.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/system.h>
#include <asm/machdep.h>
#include <asm/io.h>
#ifdef CONFIG_ATARI
#include <asm/atari_stram.h>
#endif
#include <asm/sections.h>
#include <asm/tlb.h>
DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
pg_data_t pg_data_map[MAX_NUMNODES];
EXPORT_SYMBOL(pg_data_map);
int m68k_virt_to_node_shift;
#ifndef CONFIG_SINGLE_MEMORY_CHUNK
pg_data_t *pg_data_table[65];
EXPORT_SYMBOL(pg_data_table);
#endif
void __init m68k_setup_node(int node)
{
#ifndef CONFIG_SINGLE_MEMORY_CHUNK
struct mem_info *info = m68k_memory + node;
int i, end;
i = (unsigned long)phys_to_virt(info->addr) >> __virt_to_node_shift();
end = (unsigned long)phys_to_virt(info->addr + info->size - 1) >> __virt_to_node_shift();
for (; i <= end; i++) {
if (pg_data_table[i])
printk("overlap at %u for chunk %u\n", i, node);
pg_data_table[i] = pg_data_map + node;
}
#endif
pg_data_map[node].bdata = bootmem_node_data + node;
node_set_online(node);
}
/*
* ZERO_PAGE is a special page that is used for zero-initialized
* data and COW.
*/
void *empty_zero_page;
EXPORT_SYMBOL(empty_zero_page);
extern void init_pointer_table(unsigned long ptable);
/* References to section boundaries */
extern pmd_t *zero_pgtable;
void __init mem_init(void)
{
pg_data_t *pgdat;
int codepages = 0;
int datapages = 0;
int initpages = 0;
int i;
#ifdef CONFIG_ATARI
if (MACH_IS_ATARI)
atari_stram_mem_init_hook();
#endif
/* this will put all memory onto the freelists */
totalram_pages = num_physpages = 0;
for_each_online_pgdat(pgdat) {
num_physpages += pgdat->node_present_pages;
totalram_pages += free_all_bootmem_node(pgdat);
for (i = 0; i < pgdat->node_spanned_pages; i++) {
struct page *page = pgdat->node_mem_map + i;
char *addr = page_to_virt(page);
if (!PageReserved(page))
continue;
if (addr >= _text &&
addr < _etext)
codepages++;
else if (addr >= __init_begin &&
addr < __init_end)
initpages++;
else
datapages++;
}
}
#ifndef CONFIG_SUN3
/* insert pointer tables allocated so far into the tablelist */
init_pointer_table((unsigned long)kernel_pg_dir);
for (i = 0; i < PTRS_PER_PGD; i++) {
if (pgd_present(kernel_pg_dir[i]))
init_pointer_table(__pgd_page(kernel_pg_dir[i]));
}
/* insert also pointer table that we used to unmap the zero page */
if (zero_pgtable)
init_pointer_table((unsigned long)zero_pgtable);
#endif
printk("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init)\n",
nr_free_pages() << (PAGE_SHIFT-10),
totalram_pages << (PAGE_SHIFT-10),
codepages << (PAGE_SHIFT-10),
datapages << (PAGE_SHIFT-10),
initpages << (PAGE_SHIFT-10));
}
#ifdef CONFIG_BLK_DEV_INITRD
void free_initrd_mem(unsigned long start, unsigned long end)
{
int pages = 0;
for (; start < end; start += PAGE_SIZE) {
ClearPageReserved(virt_to_page(start));
init_page_count(virt_to_page(start));
free_page(start);
totalram_pages++;
pages++;
}
printk ("Freeing initrd memory: %dk freed\n", pages);
}
#endif
/* #ifdef CONFIG_MMU
* linux/arch/m68k/mm/kmap.c #include "kmap_mm.c"
*
* Copyright (C) 1997 Roman Hodek
*
* 10/01/99 cleaned up the code and changing to the same interface
* used by other architectures /Roman Zippel
*/
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <asm/setup.h>
#include <asm/segment.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/io.h>
#include <asm/system.h>
#undef DEBUG
#define PTRTREESIZE (256*1024)
/*
* For 040/060 we can use the virtual memory area like other architectures,
* but for 020/030 we want to use early termination page descriptor and we
* can't mix this with normal page descriptors, so we have to copy that code
* (mm/vmalloc.c) and return appriorate aligned addresses.
*/
#ifdef CPU_M68040_OR_M68060_ONLY
#define IO_SIZE PAGE_SIZE
static inline struct vm_struct *get_io_area(unsigned long size)
{
return get_vm_area(size, VM_IOREMAP);
}
static inline void free_io_area(void *addr)
{
vfree((void *)(PAGE_MASK & (unsigned long)addr));
}
#else #else
#include "kmap_no.c"
#define IO_SIZE (256*1024)
static struct vm_struct *iolist;
static struct vm_struct *get_io_area(unsigned long size)
{
unsigned long addr;
struct vm_struct **p, *tmp, *area;
area = kmalloc(sizeof(*area), GFP_KERNEL);
if (!area)
return NULL;
addr = KMAP_START;
for (p = &iolist; (tmp = *p) ; p = &tmp->next) {
if (size + addr < (unsigned long)tmp->addr)
break;
if (addr > KMAP_END-size) {
kfree(area);
return NULL;
}
addr = tmp->size + (unsigned long)tmp->addr;
}
area->addr = (void *)addr;
area->size = size + IO_SIZE;
area->next = *p;
*p = area;
return area;
}
static inline void free_io_area(void *addr)
{
struct vm_struct **p, *tmp;
if (!addr)
return;
addr = (void *)((unsigned long)addr & -IO_SIZE);
for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
if (tmp->addr == addr) {
*p = tmp->next;
__iounmap(tmp->addr, tmp->size);
kfree(tmp);
return;
}
}
}
#endif #endif
/*
* Map some physical address range into the kernel address space.
*/
/* Rewritten by Andreas Schwab to remove all races. */
void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
{
struct vm_struct *area;
unsigned long virtaddr, retaddr;
long offset;
pgd_t *pgd_dir;
pmd_t *pmd_dir;
pte_t *pte_dir;
/*
* Don't allow mappings that wrap..
*/
if (!size || physaddr > (unsigned long)(-size))
return NULL;
#ifdef CONFIG_AMIGA
if (MACH_IS_AMIGA) {
if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000)
&& (cacheflag == IOMAP_NOCACHE_SER))
return (void __iomem *)physaddr;
}
#endif
#ifdef DEBUG
printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag);
#endif
/*
* Mappings have to be aligned
*/
offset = physaddr & (IO_SIZE - 1);
physaddr &= -IO_SIZE;
size = (size + offset + IO_SIZE - 1) & -IO_SIZE;
/*
* Ok, go for it..
*/
area = get_io_area(size);
if (!area)
return NULL;
virtaddr = (unsigned long)area->addr;
retaddr = virtaddr + offset;
#ifdef DEBUG
printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr);
#endif
/*
* add cache and table flags to physical address
*/
if (CPU_IS_040_OR_060) {
physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 |
_PAGE_ACCESSED | _PAGE_DIRTY);
switch (cacheflag) {
case IOMAP_FULL_CACHING:
physaddr |= _PAGE_CACHE040;
break;
case IOMAP_NOCACHE_SER:
default:
physaddr |= _PAGE_NOCACHE_S;
break;
case IOMAP_NOCACHE_NONSER:
physaddr |= _PAGE_NOCACHE;
break;
case IOMAP_WRITETHROUGH:
physaddr |= _PAGE_CACHE040W;
break;
}
} else {
physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
switch (cacheflag) {
case IOMAP_NOCACHE_SER:
case IOMAP_NOCACHE_NONSER:
default:
physaddr |= _PAGE_NOCACHE030;
break;
case IOMAP_FULL_CACHING:
case IOMAP_WRITETHROUGH:
break;
}
}
while ((long)size > 0) {
#ifdef DEBUG
if (!(virtaddr & (PTRTREESIZE-1)))
printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
#endif
pgd_dir = pgd_offset_k(virtaddr);
pmd_dir = pmd_alloc(&init_mm, pgd_dir, virtaddr);
if (!pmd_dir) {
printk("ioremap: no mem for pmd_dir\n");
return NULL;
}
if (CPU_IS_020_OR_030) {
pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
physaddr += PTRTREESIZE;
virtaddr += PTRTREESIZE;
size -= PTRTREESIZE;
} else {
pte_dir = pte_alloc_kernel(pmd_dir, virtaddr);
if (!pte_dir) {
printk("ioremap: no mem for pte_dir\n");
return NULL;
}
pte_val(*pte_dir) = physaddr;
virtaddr += PAGE_SIZE;
physaddr += PAGE_SIZE;
size -= PAGE_SIZE;
}
}
#ifdef DEBUG
printk("\n");
#endif
flush_tlb_all();
return (void __iomem *)retaddr;
}
EXPORT_SYMBOL(__ioremap);
/*
* Unmap a ioremap()ed region again
*/
void iounmap(void __iomem *addr)
{
#ifdef CONFIG_AMIGA
if ((!MACH_IS_AMIGA) ||
(((unsigned long)addr < 0x40000000) ||
((unsigned long)addr > 0x60000000)))
free_io_area((__force void *)addr);
#else
free_io_area((__force void *)addr);
#endif
}
EXPORT_SYMBOL(iounmap);
/*
* __iounmap unmaps nearly everything, so be careful
* it doesn't free currently pointer/page tables anymore but it
* wans't used anyway and might be added later.
*/
void __iounmap(void *addr, unsigned long size)
{
unsigned long virtaddr = (unsigned long)addr;
pgd_t *pgd_dir;
pmd_t *pmd_dir;
pte_t *pte_dir;
while ((long)size > 0) {
pgd_dir = pgd_offset_k(virtaddr);
if (pgd_bad(*pgd_dir)) {
printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
pgd_clear(pgd_dir);
return;
}
pmd_dir = pmd_offset(pgd_dir, virtaddr);
if (CPU_IS_020_OR_030) {
int pmd_off = (virtaddr/PTRTREESIZE) & 15;
int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK;
if (pmd_type == _PAGE_PRESENT) {
pmd_dir->pmd[pmd_off] = 0;
virtaddr += PTRTREESIZE;
size -= PTRTREESIZE;
continue;
} else if (pmd_type == 0)
continue;
}
if (pmd_bad(*pmd_dir)) {
printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
pmd_clear(pmd_dir);
return;
}
pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
pte_val(*pte_dir) = 0;
virtaddr += PAGE_SIZE;
size -= PAGE_SIZE;
}
flush_tlb_all();
}
/*
* Set new cache mode for some kernel address space.
* The caller must push data for that range itself, if such data may already
* be in the cache.
*/
void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
{
unsigned long virtaddr = (unsigned long)addr;
pgd_t *pgd_dir;
pmd_t *pmd_dir;
pte_t *pte_dir;
if (CPU_IS_040_OR_060) {
switch (cmode) {
case IOMAP_FULL_CACHING:
cmode = _PAGE_CACHE040;
break;
case IOMAP_NOCACHE_SER:
default:
cmode = _PAGE_NOCACHE_S;
break;
case IOMAP_NOCACHE_NONSER:
cmode = _PAGE_NOCACHE;
break;
case IOMAP_WRITETHROUGH:
cmode = _PAGE_CACHE040W;
break;
}
} else {
switch (cmode) {
case IOMAP_NOCACHE_SER:
case IOMAP_NOCACHE_NONSER:
default:
cmode = _PAGE_NOCACHE030;
break;
case IOMAP_FULL_CACHING:
case IOMAP_WRITETHROUGH:
cmode = 0;
}
}
while ((long)size > 0) {
pgd_dir = pgd_offset_k(virtaddr);
if (pgd_bad(*pgd_dir)) {
printk("iocachemode: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
pgd_clear(pgd_dir);
return;
}
pmd_dir = pmd_offset(pgd_dir, virtaddr);
if (CPU_IS_020_OR_030) {
int pmd_off = (virtaddr/PTRTREESIZE) & 15;
if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) {
pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] &
_CACHEMASK040) | cmode;
virtaddr += PTRTREESIZE;
size -= PTRTREESIZE;
continue;
}
}
if (pmd_bad(*pmd_dir)) {
printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
pmd_clear(pmd_dir);
return;
}
pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode;
virtaddr += PAGE_SIZE;
size -= PAGE_SIZE;
}
flush_tlb_all();
}
EXPORT_SYMBOL(kernel_set_cachemode);
/*
* linux/arch/m68k/mm/kmap.c
*
* Copyright (C) 1997 Roman Hodek
*
* 10/01/99 cleaned up the code and changing to the same interface
* used by other architectures /Roman Zippel
*/
#include <linux/module.h>
#include <linux/mm.h>
#include <linux/kernel.h>
#include <linux/string.h>
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
#include <asm/setup.h>
#include <asm/segment.h>
#include <asm/page.h>
#include <asm/pgalloc.h>
#include <asm/io.h>
#include <asm/system.h>
#undef DEBUG
#define PTRTREESIZE (256*1024)
/*
* For 040/060 we can use the virtual memory area like other architectures,
* but for 020/030 we want to use early termination page descriptor and we
* can't mix this with normal page descriptors, so we have to copy that code
* (mm/vmalloc.c) and return appriorate aligned addresses.
*/
#ifdef CPU_M68040_OR_M68060_ONLY
#define IO_SIZE PAGE_SIZE
static inline struct vm_struct *get_io_area(unsigned long size)
{
return get_vm_area(size, VM_IOREMAP);
}
static inline void free_io_area(void *addr)
{
vfree((void *)(PAGE_MASK & (unsigned long)addr));
}
#else
#define IO_SIZE (256*1024)
static struct vm_struct *iolist;
static struct vm_struct *get_io_area(unsigned long size)
{
unsigned long addr;
struct vm_struct **p, *tmp, *area;
area = kmalloc(sizeof(*area), GFP_KERNEL);
if (!area)
return NULL;
addr = KMAP_START;
for (p = &iolist; (tmp = *p) ; p = &tmp->next) {
if (size + addr < (unsigned long)tmp->addr)
break;
if (addr > KMAP_END-size) {
kfree(area);
return NULL;
}
addr = tmp->size + (unsigned long)tmp->addr;
}
area->addr = (void *)addr;
area->size = size + IO_SIZE;
area->next = *p;
*p = area;
return area;
}
static inline void free_io_area(void *addr)
{
struct vm_struct **p, *tmp;
if (!addr)
return;
addr = (void *)((unsigned long)addr & -IO_SIZE);
for (p = &iolist ; (tmp = *p) ; p = &tmp->next) {
if (tmp->addr == addr) {
*p = tmp->next;
__iounmap(tmp->addr, tmp->size);
kfree(tmp);
return;
}
}
}
#endif
/*
* Map some physical address range into the kernel address space.
*/
/* Rewritten by Andreas Schwab to remove all races. */
void __iomem *__ioremap(unsigned long physaddr, unsigned long size, int cacheflag)
{
struct vm_struct *area;
unsigned long virtaddr, retaddr;
long offset;
pgd_t *pgd_dir;
pmd_t *pmd_dir;
pte_t *pte_dir;
/*
* Don't allow mappings that wrap..
*/
if (!size || physaddr > (unsigned long)(-size))
return NULL;
#ifdef CONFIG_AMIGA
if (MACH_IS_AMIGA) {
if ((physaddr >= 0x40000000) && (physaddr + size < 0x60000000)
&& (cacheflag == IOMAP_NOCACHE_SER))
return (void __iomem *)physaddr;
}
#endif
#ifdef DEBUG
printk("ioremap: 0x%lx,0x%lx(%d) - ", physaddr, size, cacheflag);
#endif
/*
* Mappings have to be aligned
*/
offset = physaddr & (IO_SIZE - 1);
physaddr &= -IO_SIZE;
size = (size + offset + IO_SIZE - 1) & -IO_SIZE;
/*
* Ok, go for it..
*/
area = get_io_area(size);
if (!area)
return NULL;
virtaddr = (unsigned long)area->addr;
retaddr = virtaddr + offset;
#ifdef DEBUG
printk("0x%lx,0x%lx,0x%lx", physaddr, virtaddr, retaddr);
#endif
/*
* add cache and table flags to physical address
*/
if (CPU_IS_040_OR_060) {
physaddr |= (_PAGE_PRESENT | _PAGE_GLOBAL040 |
_PAGE_ACCESSED | _PAGE_DIRTY);
switch (cacheflag) {
case IOMAP_FULL_CACHING:
physaddr |= _PAGE_CACHE040;
break;
case IOMAP_NOCACHE_SER:
default:
physaddr |= _PAGE_NOCACHE_S;
break;
case IOMAP_NOCACHE_NONSER:
physaddr |= _PAGE_NOCACHE;
break;
case IOMAP_WRITETHROUGH:
physaddr |= _PAGE_CACHE040W;
break;
}
} else {
physaddr |= (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY);
switch (cacheflag) {
case IOMAP_NOCACHE_SER:
case IOMAP_NOCACHE_NONSER:
default:
physaddr |= _PAGE_NOCACHE030;
break;
case IOMAP_FULL_CACHING:
case IOMAP_WRITETHROUGH:
break;
}
}
while ((long)size > 0) {
#ifdef DEBUG
if (!(virtaddr & (PTRTREESIZE-1)))
printk ("\npa=%#lx va=%#lx ", physaddr, virtaddr);
#endif
pgd_dir = pgd_offset_k(virtaddr);
pmd_dir = pmd_alloc(&init_mm, pgd_dir, virtaddr);
if (!pmd_dir) {
printk("ioremap: no mem for pmd_dir\n");
return NULL;
}
if (CPU_IS_020_OR_030) {
pmd_dir->pmd[(virtaddr/PTRTREESIZE) & 15] = physaddr;
physaddr += PTRTREESIZE;
virtaddr += PTRTREESIZE;
size -= PTRTREESIZE;
} else {
pte_dir = pte_alloc_kernel(pmd_dir, virtaddr);
if (!pte_dir) {
printk("ioremap: no mem for pte_dir\n");
return NULL;
}
pte_val(*pte_dir) = physaddr;
virtaddr += PAGE_SIZE;
physaddr += PAGE_SIZE;
size -= PAGE_SIZE;
}
}
#ifdef DEBUG
printk("\n");
#endif
flush_tlb_all();
return (void __iomem *)retaddr;
}
EXPORT_SYMBOL(__ioremap);
/*
* Unmap a ioremap()ed region again
*/
void iounmap(void __iomem *addr)
{
#ifdef CONFIG_AMIGA
if ((!MACH_IS_AMIGA) ||
(((unsigned long)addr < 0x40000000) ||
((unsigned long)addr > 0x60000000)))
free_io_area((__force void *)addr);
#else
free_io_area((__force void *)addr);
#endif
}
EXPORT_SYMBOL(iounmap);
/*
* __iounmap unmaps nearly everything, so be careful
* it doesn't free currently pointer/page tables anymore but it
* wans't used anyway and might be added later.
*/
void __iounmap(void *addr, unsigned long size)
{
unsigned long virtaddr = (unsigned long)addr;
pgd_t *pgd_dir;
pmd_t *pmd_dir;
pte_t *pte_dir;
while ((long)size > 0) {
pgd_dir = pgd_offset_k(virtaddr);
if (pgd_bad(*pgd_dir)) {
printk("iounmap: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
pgd_clear(pgd_dir);
return;
}
pmd_dir = pmd_offset(pgd_dir, virtaddr);
if (CPU_IS_020_OR_030) {
int pmd_off = (virtaddr/PTRTREESIZE) & 15;
int pmd_type = pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK;
if (pmd_type == _PAGE_PRESENT) {
pmd_dir->pmd[pmd_off] = 0;
virtaddr += PTRTREESIZE;
size -= PTRTREESIZE;
continue;
} else if (pmd_type == 0)
continue;
}
if (pmd_bad(*pmd_dir)) {
printk("iounmap: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
pmd_clear(pmd_dir);
return;
}
pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
pte_val(*pte_dir) = 0;
virtaddr += PAGE_SIZE;
size -= PAGE_SIZE;
}
flush_tlb_all();
}
/*
* Set new cache mode for some kernel address space.
* The caller must push data for that range itself, if such data may already
* be in the cache.
*/
void kernel_set_cachemode(void *addr, unsigned long size, int cmode)
{
unsigned long virtaddr = (unsigned long)addr;
pgd_t *pgd_dir;
pmd_t *pmd_dir;
pte_t *pte_dir;
if (CPU_IS_040_OR_060) {
switch (cmode) {
case IOMAP_FULL_CACHING:
cmode = _PAGE_CACHE040;
break;
case IOMAP_NOCACHE_SER:
default:
cmode = _PAGE_NOCACHE_S;
break;
case IOMAP_NOCACHE_NONSER:
cmode = _PAGE_NOCACHE;
break;
case IOMAP_WRITETHROUGH:
cmode = _PAGE_CACHE040W;
break;
}
} else {
switch (cmode) {
case IOMAP_NOCACHE_SER:
case IOMAP_NOCACHE_NONSER:
default:
cmode = _PAGE_NOCACHE030;
break;
case IOMAP_FULL_CACHING:
case IOMAP_WRITETHROUGH:
cmode = 0;
}
}
while ((long)size > 0) {
pgd_dir = pgd_offset_k(virtaddr);
if (pgd_bad(*pgd_dir)) {
printk("iocachemode: bad pgd(%08lx)\n", pgd_val(*pgd_dir));
pgd_clear(pgd_dir);
return;
}
pmd_dir = pmd_offset(pgd_dir, virtaddr);
if (CPU_IS_020_OR_030) {
int pmd_off = (virtaddr/PTRTREESIZE) & 15;
if ((pmd_dir->pmd[pmd_off] & _DESCTYPE_MASK) == _PAGE_PRESENT) {
pmd_dir->pmd[pmd_off] = (pmd_dir->pmd[pmd_off] &
_CACHEMASK040) | cmode;
virtaddr += PTRTREESIZE;
size -= PTRTREESIZE;
continue;
}
}
if (pmd_bad(*pmd_dir)) {
printk("iocachemode: bad pmd (%08lx)\n", pmd_val(*pmd_dir));
pmd_clear(pmd_dir);
return;
}
pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
pte_val(*pte_dir) = (pte_val(*pte_dir) & _CACHEMASK040) | cmode;
virtaddr += PAGE_SIZE;
size -= PAGE_SIZE;
}
flush_tlb_all();
}
EXPORT_SYMBOL(kernel_set_cachemode);
menu "Kernel hacking"
source "lib/Kconfig.debug"
config FULLDEBUG
bool "Full Symbolic/Source Debugging support"
help
Enable debugging symbols on kernel build.
config HIGHPROFILE
bool "Use fast second timer for profiling"
depends on COLDFIRE
help
Use a fast secondary clock to produce profiling information.
config BOOTPARAM
bool 'Compiled-in Kernel Boot Parameter'
config BOOTPARAM_STRING
string 'Kernel Boot Parameter'
default 'console=ttyS0,19200'
depends on BOOTPARAM
config NO_KERNEL_MSG
bool "Suppress Kernel BUG Messages"
help
Do not output any debug BUG messages within the kernel.
config BDM_DISABLE
bool "Disable BDM signals"
depends on (EXPERIMENTAL && COLDFIRE)
help
Disable the ColdFire CPU's BDM signals.
endmenu
CONFIG_EXPERIMENTAL=y
CONFIG_LOG_BUF_SHIFT=14
# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set
CONFIG_EXPERT=y
# CONFIG_KALLSYMS is not set
# CONFIG_HOTPLUG is not set
# CONFIG_FUTEX is not set
# CONFIG_EPOLL is not set
# CONFIG_SIGNALFD is not set
# CONFIG_TIMERFD is not set
# CONFIG_EVENTFD is not set
# CONFIG_AIO is not set
# CONFIG_VM_EVENT_COUNTERS is not set
# CONFIG_COMPAT_BRK is not set
# CONFIG_BLK_DEV_BSG is not set
# CONFIG_IOSCHED_DEADLINE is not set
# CONFIG_IOSCHED_CFQ is not set
CONFIG_M520x=y
CONFIG_CLOCK_SET=y
CONFIG_CLOCK_FREQ=166666666
CONFIG_CLOCK_DIV=2
CONFIG_M5208EVB=y
# CONFIG_4KSTACKS is not set
CONFIG_RAMBASE=0x40000000
CONFIG_RAMSIZE=0x2000000
CONFIG_VECTORBASE=0x40000000
CONFIG_KERNELBASE=0x40020000
CONFIG_RAM16BIT=y
CONFIG_BINFMT_FLAT=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
CONFIG_INET=y
# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
# CONFIG_INET_XFRM_MODE_TUNNEL is not set
# CONFIG_INET_XFRM_MODE_BEET is not set
# CONFIG_INET_LRO is not set
# CONFIG_INET_DIAG is not set
# CONFIG_IPV6 is not set
CONFIG_MTD=y
CONFIG_MTD_PARTITIONS=y
CONFIG_MTD_CHAR=y
CONFIG_MTD_BLOCK=y
CONFIG_MTD_RAM=y
CONFIG_MTD_UCLINUX=y
CONFIG_BLK_DEV_RAM=y
# CONFIG_MISC_DEVICES is not set
CONFIG_NETDEVICES=y
CONFIG_NET_ETHERNET=y
CONFIG_FEC=y
# CONFIG_NETDEV_1000 is not set
# CONFIG_NETDEV_10000 is not set
# CONFIG_INPUT is not set
# CONFIG_SERIO is not set
# CONFIG_VT is not set
CONFIG_SERIAL_MCF=y
CONFIG_SERIAL_MCF_BAUDRATE=115200
CONFIG_SERIAL_MCF_CONSOLE=y
# CONFIG_UNIX98_PTYS is not set
# CONFIG_HW_RANDOM is not set
# CONFIG_HWMON is not set
# CONFIG_USB_SUPPORT is not set
CONFIG_EXT2_FS=y
# CONFIG_FILE_LOCKING is not set
# CONFIG_DNOTIFY is not set
# CONFIG_SYSFS is not set
CONFIG_ROMFS_FS=y
CONFIG_ROMFS_BACKED_BY_MTD=y
# CONFIG_NETWORK_FILESYSTEMS is not set
# CONFIG_RCU_CPU_STALL_DETECTOR is not set
CONFIG_SYSCTL_SYSCALL_CHECK=y
CONFIG_FULLDEBUG=y
CONFIG_BOOTPARAM=y
CONFIG_BOOTPARAM_STRING="root=/dev/mtdblock0"
/* ashrdi3.c extracted from gcc-2.95.2/libgcc2.c which is: */
/* Copyright (C) 1989, 92-98, 1999 Free Software Foundation, Inc.
This file is part of GNU CC.
GNU CC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
GNU CC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
#define BITS_PER_UNIT 8
typedef int SItype __attribute__ ((mode (SI)));
typedef unsigned int USItype __attribute__ ((mode (SI)));
typedef int DItype __attribute__ ((mode (DI)));
typedef int word_type __attribute__ ((mode (__word__)));
struct DIstruct {SItype high, low;};
typedef union
{
struct DIstruct s;
DItype ll;
} DIunion;
DItype
__ashldi3 (DItype u, word_type b)
{
DIunion w;
word_type bm;
DIunion uu;
if (b == 0)
return u;
uu.ll = u;
bm = (sizeof (SItype) * BITS_PER_UNIT) - b;
if (bm <= 0)
{
w.s.low = 0;
w.s.high = (USItype)uu.s.low << -bm;
}
else
{
USItype carries = (USItype)uu.s.low >> bm;
w.s.low = (USItype)uu.s.low << b;
w.s.high = ((USItype)uu.s.high << b) | carries;
}
return w.ll;
}
/* lshrdi3.c extracted from gcc-2.7.2/libgcc2.c which is: */
/* Copyright (C) 1989, 1992, 1993, 1994, 1995 Free Software Foundation, Inc.
This file is part of GNU CC.
GNU CC is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2, or (at your option)
any later version.
GNU CC is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with GNU CC; see the file COPYING. If not, write to
the Free Software Foundation, 59 Temple Place - Suite 330,
Boston, MA 02111-1307, USA. */
#define BITS_PER_UNIT 8
typedef int SItype __attribute__ ((mode (SI)));
typedef unsigned int USItype __attribute__ ((mode (SI)));
typedef int DItype __attribute__ ((mode (DI)));
typedef int word_type __attribute__ ((mode (__word__)));
struct DIstruct {SItype high, low;};
typedef union
{
struct DIstruct s;
DItype ll;
} DIunion;
DItype
__lshrdi3 (DItype u, word_type b)
{
DIunion w;
word_type bm;
DIunion uu;
if (b == 0)
return u;
uu.ll = u;
bm = (sizeof (SItype) * BITS_PER_UNIT) - b;
if (bm <= 0)
{
w.s.high = 0;
w.s.low = (USItype)uu.s.high >> -bm;
}
else
{
USItype carries = (USItype)uu.s.high << bm;
w.s.high = (USItype)uu.s.high >> b;
w.s.low = ((USItype)uu.s.low >> b) | carries;
}
return w.ll;
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment