Commit 3c0cb7c3 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'devel' of master.kernel.org:/home/rmk/linux-2.6-arm

* 'devel' of master.kernel.org:/home/rmk/linux-2.6-arm: (416 commits)
  ARM: DMA: add support for DMA debugging
  ARM: PL011: add DMA burst threshold support for ST variants
  ARM: PL011: Add support for transmit DMA
  ARM: PL011: Ensure IRQs are disabled in UART interrupt handler
  ARM: PL011: Separate hardware FIFO size from TTY FIFO size
  ARM: PL011: Allow better handling of vendor data
  ARM: PL011: Ensure error flags are clear at startup
  ARM: PL011: include revision number in boot-time port printk
  ARM: vexpress: add sched_clock() for Versatile Express
  ARM i.MX53: Make MX53 EVK bootable
  ARM i.MX53: Some bug fix about MX53 MSL code
  ARM: 6607/1: sa1100: Update platform device registration
  ARM: 6606/1: sa1100: Fix platform device registration
  ARM i.MX51: rename IPU irqs
  ARM i.MX51: Add ipu clock support
  ARM: imx/mx27_3ds: Add PMIC support
  ARM: DMA: Replace page_to_dma()/dma_to_page() with pfn_to_dma()/dma_to_pfn()
  mx51: fix usb clock support
  MX51: Add support for usb host 2
  arch/arm/plat-mxc/ehci.c: fix errors/typos
  ...
parents f70f5b9d 404a02cb
......@@ -34,3 +34,5 @@ memory.txt
- description of the virtual memory layout
nwfpe/
- NWFPE floating point emulator documentation
swp_emulation
- SWP/SWPB emulation handler/logging description
Software emulation of deprecated SWP instruction (CONFIG_SWP_EMULATE)
---------------------------------------------------------------------
ARMv6 architecture deprecates use of the SWP/SWPB instructions, and recommeds
moving to the load-locked/store-conditional instructions LDREX and STREX.
ARMv7 multiprocessing extensions introduce the ability to disable these
instructions, triggering an undefined instruction exception when executed.
Trapped instructions are emulated using an LDREX/STREX or LDREXB/STREXB
sequence. If a memory access fault (an abort) occurs, a segmentation fault is
signalled to the triggering process.
/proc/cpu/swp_emulation holds some statistics/information, including the PID of
the last process to trigger the emulation to be invocated. For example:
---
Emulated SWP: 12
Emulated SWPB: 0
Aborted SWP{B}: 1
Last process: 314
---
NOTE: when accessing uncached shared regions, LDREX/STREX rely on an external
transaction monitoring block called a global monitor to maintain update
atomicity. If your system does not implement a global monitor, this option can
cause programs that perform SWP operations to uncached memory to deadlock, as
the STREX operation will always fail.
This diff is collapsed.
......@@ -23,7 +23,7 @@ config STRICT_DEVMEM
config FRAME_POINTER
bool
depends on !THUMB2_KERNEL
default y if !ARM_UNWIND
default y if !ARM_UNWIND || FUNCTION_GRAPH_TRACER
help
If you say N here, the resulting kernel will be slightly smaller and
faster. However, if neither FRAME_POINTER nor ARM_UNWIND are enabled,
......@@ -31,7 +31,7 @@ config FRAME_POINTER
reported is severely limited.
config ARM_UNWIND
bool "Enable stack unwinding support"
bool "Enable stack unwinding support (EXPERIMENTAL)"
depends on AEABI && EXPERIMENTAL
default y
help
......
......@@ -154,10 +154,11 @@ machine-$(CONFIG_ARCH_MSM) := msm
machine-$(CONFIG_ARCH_MV78XX0) := mv78xx0
machine-$(CONFIG_ARCH_MX1) := imx
machine-$(CONFIG_ARCH_MX2) := imx
machine-$(CONFIG_ARCH_MX25) := mx25
machine-$(CONFIG_ARCH_MX25) := imx
machine-$(CONFIG_ARCH_MX3) := mx3
machine-$(CONFIG_ARCH_MX5) := mx5
machine-$(CONFIG_ARCH_MXC91231) := mxc91231
machine-$(CONFIG_ARCH_MXS) := mxs
machine-$(CONFIG_ARCH_NETX) := netx
machine-$(CONFIG_ARCH_NOMADIK) := nomadik
machine-$(CONFIG_ARCH_NS9XXX) := ns9xxx
......
......@@ -45,6 +45,10 @@ else
endif
endif
ifeq ($(CONFIG_ARCH_SHMOBILE),y)
OBJS += head-shmobile.o
endif
#
# We now have a PIC decompressor implementation. Decompressors running
# from RAM should not define ZTEXTADDR. Decompressors running directly
......
/*
* The head-file for SH-Mobile ARM platforms
*
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
* Simon Horman <horms@verge.net.au>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
* the Free Software Foundation; version 2 of the License.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifdef CONFIG_ZBOOT_ROM
.section ".start", "ax"
/* load board-specific initialization code */
#include <mach/zboot.h>
b 1f
__atags:@ tag #1
.long 12 @ tag->hdr.size = tag_size(tag_core);
.long 0x54410001 @ tag->hdr.tag = ATAG_CORE;
.long 0 @ tag->u.core.flags = 0;
.long 0 @ tag->u.core.pagesize = 0;
.long 0 @ tag->u.core.rootdev = 0;
@ tag #2
.long 8 @ tag->hdr.size = tag_size(tag_mem32);
.long 0x54410002 @ tag->hdr.tag = ATAG_MEM;
.long CONFIG_MEMORY_SIZE @ tag->u.mem.size = CONFIG_MEMORY_SIZE;
.long CONFIG_MEMORY_START @ @ tag->u.mem.start = CONFIG_MEMORY_START;
@ tag #3
.long 0 @ tag->hdr.size = 0
.long 0 @ tag->hdr.tag = ATAG_NONE;
1:
/* Set board ID necessary for boot */
ldr r7, 1f @ Set machine type register
adr r8, __atags @ Set atag register
b 2f
1 : .long MACH_TYPE
2 :
#endif /* CONFIG_ZBOOT_ROM */
......@@ -37,7 +37,3 @@ config SHARP_PARAM
config SHARP_SCOOP
bool
config COMMON_CLKDEV
bool
select HAVE_CLK
......@@ -17,3 +17,4 @@ obj-$(CONFIG_ARCH_IXP2000) += uengine.o
obj-$(CONFIG_ARCH_IXP23XX) += uengine.o
obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o
obj-$(CONFIG_COMMON_CLKDEV) += clkdev.o
obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o
......@@ -328,7 +328,7 @@ static inline void unmap_single(struct device *dev, dma_addr_t dma_addr,
* substitute the safe buffer for the unsafe one.
* (basically move the buffer from an unsafe area to a safe one)
*/
dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
dma_addr_t __dma_map_single(struct device *dev, void *ptr, size_t size,
enum dma_data_direction dir)
{
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
......@@ -338,7 +338,7 @@ dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
return map_single(dev, ptr, size, dir);
}
EXPORT_SYMBOL(dma_map_single);
EXPORT_SYMBOL(__dma_map_single);
/*
* see if a mapped address was really a "safe" buffer and if so, copy
......@@ -346,7 +346,7 @@ EXPORT_SYMBOL(dma_map_single);
* the safe buffer. (basically return things back to the way they
* should be)
*/
void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
void __dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir)
{
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
......@@ -354,9 +354,9 @@ void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
unmap_single(dev, dma_addr, size, dir);
}
EXPORT_SYMBOL(dma_unmap_single);
EXPORT_SYMBOL(__dma_unmap_single);
dma_addr_t dma_map_page(struct device *dev, struct page *page,
dma_addr_t __dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir)
{
dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
......@@ -372,7 +372,7 @@ dma_addr_t dma_map_page(struct device *dev, struct page *page,
return map_single(dev, page_address(page) + offset, size, dir);
}
EXPORT_SYMBOL(dma_map_page);
EXPORT_SYMBOL(__dma_map_page);
/*
* see if a mapped address was really a "safe" buffer and if so, copy
......@@ -380,7 +380,7 @@ EXPORT_SYMBOL(dma_map_page);
* the safe buffer. (basically return things back to the way they
* should be)
*/
void dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
void __dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
enum dma_data_direction dir)
{
dev_dbg(dev, "%s(ptr=%p,size=%d,dir=%x)\n",
......@@ -388,7 +388,7 @@ void dma_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
unmap_single(dev, dma_addr, size, dir);
}
EXPORT_SYMBOL(dma_unmap_page);
EXPORT_SYMBOL(__dma_unmap_page);
int dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
unsigned long off, size_t sz, enum dma_data_direction dir)
......
......@@ -35,6 +35,9 @@
static DEFINE_SPINLOCK(irq_controller_lock);
/* Address of GIC 0 CPU interface */
void __iomem *gic_cpu_base_addr __read_mostly;
struct gic_chip_data {
unsigned int irq_offset;
void __iomem *dist_base;
......@@ -45,7 +48,7 @@ struct gic_chip_data {
#define MAX_GIC_NR 1
#endif
static struct gic_chip_data gic_data[MAX_GIC_NR];
static struct gic_chip_data gic_data[MAX_GIC_NR] __read_mostly;
static inline void __iomem *gic_dist_base(unsigned int irq)
{
......@@ -213,21 +216,16 @@ void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
set_irq_chained_handler(irq, gic_handle_cascade_irq);
}
void __init gic_dist_init(unsigned int gic_nr, void __iomem *base,
static void __init gic_dist_init(struct gic_chip_data *gic,
unsigned int irq_start)
{
unsigned int gic_irqs, irq_limit, i;
void __iomem *base = gic->dist_base;
u32 cpumask = 1 << smp_processor_id();
if (gic_nr >= MAX_GIC_NR)
BUG();
cpumask |= cpumask << 8;
cpumask |= cpumask << 16;
gic_data[gic_nr].dist_base = base;
gic_data[gic_nr].irq_offset = (irq_start - 1) & ~31;
writel(0, base + GIC_DIST_CTRL);
/*
......@@ -267,7 +265,7 @@ void __init gic_dist_init(unsigned int gic_nr, void __iomem *base,
/*
* Limit number of interrupts registered to the platform maximum
*/
irq_limit = gic_data[gic_nr].irq_offset + gic_irqs;
irq_limit = gic->irq_offset + gic_irqs;
if (WARN_ON(irq_limit > NR_IRQS))
irq_limit = NR_IRQS;
......@@ -276,7 +274,7 @@ void __init gic_dist_init(unsigned int gic_nr, void __iomem *base,
*/
for (i = irq_start; i < irq_limit; i++) {
set_irq_chip(i, &gic_chip);
set_irq_chip_data(i, &gic_data[gic_nr]);
set_irq_chip_data(i, gic);
set_irq_handler(i, handle_level_irq);
set_irq_flags(i, IRQF_VALID | IRQF_PROBE);
}
......@@ -284,19 +282,12 @@ void __init gic_dist_init(unsigned int gic_nr, void __iomem *base,
writel(1, base + GIC_DIST_CTRL);
}
void __cpuinit gic_cpu_init(unsigned int gic_nr, void __iomem *base)
static void __cpuinit gic_cpu_init(struct gic_chip_data *gic)
{
void __iomem *dist_base;
void __iomem *dist_base = gic->dist_base;
void __iomem *base = gic->cpu_base;
int i;
if (gic_nr >= MAX_GIC_NR)
BUG();
dist_base = gic_data[gic_nr].dist_base;
BUG_ON(!dist_base);
gic_data[gic_nr].cpu_base = base;
/*
* Deal with the banked PPI and SGI interrupts - disable all
* PPI interrupts, ensure all SGI interrupts are enabled.
......@@ -314,6 +305,42 @@ void __cpuinit gic_cpu_init(unsigned int gic_nr, void __iomem *base)
writel(1, base + GIC_CPU_CTRL);
}
void __init gic_init(unsigned int gic_nr, unsigned int irq_start,
void __iomem *dist_base, void __iomem *cpu_base)
{
struct gic_chip_data *gic;
BUG_ON(gic_nr >= MAX_GIC_NR);
gic = &gic_data[gic_nr];
gic->dist_base = dist_base;
gic->cpu_base = cpu_base;
gic->irq_offset = (irq_start - 1) & ~31;
if (gic_nr == 0)
gic_cpu_base_addr = cpu_base;
gic_dist_init(gic, irq_start);
gic_cpu_init(gic);
}
void __cpuinit gic_secondary_init(unsigned int gic_nr)
{
BUG_ON(gic_nr >= MAX_GIC_NR);
gic_cpu_init(&gic_data[gic_nr]);
}
void __cpuinit gic_enable_ppi(unsigned int irq)
{
unsigned long flags;
local_irq_save(flags);
irq_to_desc(irq)->status |= IRQ_NOPROBE;
gic_unmask_irq(irq);
local_irq_restore(flags);
}
#ifdef CONFIG_SMP
void gic_raise_softirq(const struct cpumask *mask, unsigned int irq)
{
......
/*
* linux/arch/arm/plat-versatile/timer-sp.c
* linux/arch/arm/common/timer-sp.c
*
* Copyright (C) 1999 - 2003 ARM Limited
* Copyright (C) 2000 Deep Blue Solutions Ltd
......@@ -26,8 +26,6 @@
#include <asm/hardware/arm_timer.h>
#include <plat/timer-sp.h>
/*
* These timers are currently always setup to be clocked at 1MHz.
*/
......@@ -46,7 +44,6 @@ static struct clocksource clocksource_sp804 = {
.rating = 200,
.read = sp804_read,
.mask = CLOCKSOURCE_MASK(32),
.shift = 20,
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
......@@ -63,8 +60,7 @@ void __init sp804_clocksource_init(void __iomem *base)
writel(TIMER_CTRL_32BIT | TIMER_CTRL_ENABLE | TIMER_CTRL_PERIODIC,
clksrc_base + TIMER_CTRL);
cs->mult = clocksource_khz2mult(TIMER_FREQ_KHZ, cs->shift);
clocksource_register(cs);
clocksource_register_khz(cs, TIMER_FREQ_KHZ);
}
......
......@@ -84,6 +84,7 @@ CONFIG_SERIAL_IMX_CONSOLE=y
CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_IMX=y
CONFIG_SPI=y
CONFIG_W1=y
CONFIG_W1_MASTER_MXC=y
CONFIG_W1_SLAVE_THERM=y
......
......@@ -18,6 +18,7 @@
#endif
#include <asm/ptrace.h>
#include <asm/domain.h>
/*
* Endian independent macros for shifting bytes within registers.
......@@ -157,16 +158,24 @@
#ifdef CONFIG_SMP
#define ALT_SMP(instr...) \
9998: instr
/*
* Note: if you get assembler errors from ALT_UP() when building with
* CONFIG_THUMB2_KERNEL, you almost certainly need to use
* ALT_SMP( W(instr) ... )
*/
#define ALT_UP(instr...) \
.pushsection ".alt.smp.init", "a" ;\
.long 9998b ;\
instr ;\
9997: instr ;\
.if . - 9997b != 4 ;\
.error "ALT_UP() content must assemble to exactly 4 bytes";\
.endif ;\
.popsection
#define ALT_UP_B(label) \
.equ up_b_offset, label - 9998b ;\
.pushsection ".alt.smp.init", "a" ;\
.long 9998b ;\
b . + up_b_offset ;\
W(b) . + up_b_offset ;\
.popsection
#else
#define ALT_SMP(instr...)
......@@ -177,16 +186,24 @@
/*
* SMP data memory barrier
*/
.macro smp_dmb
.macro smp_dmb mode
#ifdef CONFIG_SMP
#if __LINUX_ARM_ARCH__ >= 7
.ifeqs "\mode","arm"
ALT_SMP(dmb)
.else
ALT_SMP(W(dmb))
.endif
#elif __LINUX_ARM_ARCH__ == 6
ALT_SMP(mcr p15, 0, r0, c7, c10, 5) @ dmb
#else
#error Incompatible SMP platform
#endif
.ifeqs "\mode","arm"
ALT_UP(nop)
.else
ALT_UP(W(nop))
.endif
#endif
.endm
......@@ -206,12 +223,12 @@
*/
#ifdef CONFIG_THUMB2_KERNEL
.macro usraccoff, instr, reg, ptr, inc, off, cond, abort
.macro usraccoff, instr, reg, ptr, inc, off, cond, abort, t=T()
9999:
.if \inc == 1
\instr\cond\()bt \reg, [\ptr, #\off]
\instr\cond\()b\()\t\().w \reg, [\ptr, #\off]
.elseif \inc == 4
\instr\cond\()t \reg, [\ptr, #\off]
\instr\cond\()\t\().w \reg, [\ptr, #\off]
.else
.error "Unsupported inc macro argument"
.endif
......@@ -246,13 +263,13 @@
#else /* !CONFIG_THUMB2_KERNEL */
.macro usracc, instr, reg, ptr, inc, cond, rept, abort
.macro usracc, instr, reg, ptr, inc, cond, rept, abort, t=T()
.rept \rept
9999:
.if \inc == 1
\instr\cond\()bt \reg, [\ptr], #\inc
\instr\cond\()b\()\t \reg, [\ptr], #\inc
.elseif \inc == 4
\instr\cond\()t \reg, [\ptr], #\inc
\instr\cond\()\t \reg, [\ptr], #\inc
.else
.error "Unsupported inc macro argument"
.endif
......
......@@ -23,4 +23,6 @@
#define ARCH_SLAB_MINALIGN 8
#endif
#define __read_mostly __attribute__((__section__(".data..read_mostly")))
#endif
......@@ -12,23 +12,13 @@
#ifndef __ASM_CLKDEV_H
#define __ASM_CLKDEV_H
struct clk;
struct device;
#include <linux/slab.h>
struct clk_lookup {
struct list_head node;
const char *dev_id;
const char *con_id;
struct clk *clk;
};
#include <mach/clkdev.h>
struct clk_lookup *clkdev_alloc(struct clk *clk, const char *con_id,
const char *dev_fmt, ...);
void clkdev_add(struct clk_lookup *cl);
void clkdev_drop(struct clk_lookup *cl);
void clkdev_add_table(struct clk_lookup *, size_t);
int clk_add_alias(const char *, const char *, char *, struct device *);
static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size)
{
return kzalloc(size, GFP_KERNEL);
}
#endif
......@@ -5,24 +5,29 @@
#include <linux/mm_types.h>
#include <linux/scatterlist.h>
#include <linux/dma-debug.h>
#include <asm-generic/dma-coherent.h>
#include <asm/memory.h>
#ifdef __arch_page_to_dma
#error Please update to __arch_pfn_to_dma
#endif
/*
* page_to_dma/dma_to_virt/virt_to_dma are architecture private functions
* used internally by the DMA-mapping API to provide DMA addresses. They
* must not be used by drivers.
* dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
* functions used internally by the DMA-mapping API to provide DMA
* addresses. They must not be used by drivers.
*/
#ifndef __arch_page_to_dma
static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
#ifndef __arch_pfn_to_dma
static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
{
return (dma_addr_t)__pfn_to_bus(page_to_pfn(page));
return (dma_addr_t)__pfn_to_bus(pfn);
}
static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr)
static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
{
return pfn_to_page(__bus_to_pfn(addr));
return __bus_to_pfn(addr);
}
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
......@@ -35,14 +40,14 @@ static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
}
#else
static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
{
return __arch_page_to_dma(dev, page);
return __arch_pfn_to_dma(dev, pfn);
}
static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr)
static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
{
return __arch_dma_to_page(dev, addr);
return __arch_dma_to_pfn(dev, addr);
}
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
......@@ -293,13 +298,13 @@ extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
/*
* The DMA API, implemented by dmabounce.c. See below for descriptions.
*/
extern dma_addr_t dma_map_single(struct device *, void *, size_t,
extern dma_addr_t __dma_map_single(struct device *, void *, size_t,
enum dma_data_direction);
extern void dma_unmap_single(struct device *, dma_addr_t, size_t,
extern void __dma_unmap_single(struct device *, dma_addr_t, size_t,
enum dma_data_direction);
extern dma_addr_t dma_map_page(struct device *, struct page *,
extern dma_addr_t __dma_map_page(struct device *, struct page *,
unsigned long, size_t, enum dma_data_direction);
extern void dma_unmap_page(struct device *, dma_addr_t, size_t,
extern void __dma_unmap_page(struct device *, dma_addr_t, size_t,
enum dma_data_direction);
/*
......@@ -323,6 +328,34 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
}
static inline dma_addr_t __dma_map_single(struct device *dev, void *cpu_addr,
size_t size, enum dma_data_direction dir)
{
__dma_single_cpu_to_dev(cpu_addr, size, dir);
return virt_to_dma(dev, cpu_addr);
}
static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir)
{
__dma_page_cpu_to_dev(page, offset, size, dir);
return pfn_to_dma(dev, page_to_pfn(page)) + offset;
}
static inline void __dma_unmap_single(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir)
{
__dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
}
static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir)
{
__dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)),
handle & ~PAGE_MASK, size, dir);
}
#endif /* CONFIG_DMABOUNCE */
/**
* dma_map_single - map a single buffer for streaming DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
......@@ -340,11 +373,16 @@ static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
size_t size, enum dma_data_direction dir)
{
dma_addr_t addr;
BUG_ON(!valid_dma_direction(dir));
__dma_single_cpu_to_dev(cpu_addr, size, dir);
addr = __dma_map_single(dev, cpu_addr, size, dir);
debug_dma_map_page(dev, virt_to_page(cpu_addr),
(unsigned long)cpu_addr & ~PAGE_MASK, size,
dir, addr, true);
return virt_to_dma(dev, cpu_addr);
return addr;
}
/**
......@@ -364,11 +402,14 @@ static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction dir)
{
dma_addr_t addr;
BUG_ON(!valid_dma_direction(dir));
__dma_page_cpu_to_dev(page, offset, size, dir);
addr = __dma_map_page(dev, page, offset, size, dir);
debug_dma_map_page(dev, page, offset, size, dir, addr, false);
return page_to_dma(dev, page) + offset;
return addr;
}
/**
......@@ -388,7 +429,8 @@ static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir)
{
__dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
debug_dma_unmap_page(dev, handle, size, dir, true);
__dma_unmap_single(dev, handle, size, dir);
}
/**
......@@ -408,10 +450,9 @@ static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
size_t size, enum dma_data_direction dir)
{
__dma_page_dev_to_cpu(dma_to_page(dev, handle), handle & ~PAGE_MASK,
size, dir);
debug_dma_unmap_page(dev, handle, size, dir, false);
__dma_unmap_page(dev, handle, size, dir);
}
#endif /* CONFIG_DMABOUNCE */
/**
* dma_sync_single_range_for_cpu
......@@ -437,6 +478,8 @@ static inline void dma_sync_single_range_for_cpu(struct device *dev,
{
BUG_ON(!valid_dma_direction(dir));
debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir);
if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
return;
......@@ -449,6 +492,8 @@ static inline void dma_sync_single_range_for_device(struct device *dev,
{
BUG_ON(!valid_dma_direction(dir));
debug_dma_sync_single_for_device(dev, handle + offset, size, dir);
if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
return;
......
......@@ -45,13 +45,17 @@
*/
#define DOMAIN_NOACCESS 0
#define DOMAIN_CLIENT 1
#ifdef CONFIG_CPU_USE_DOMAINS
#define DOMAIN_MANAGER 3
#else
#define DOMAIN_MANAGER 1
#endif
#define domain_val(dom,type) ((type) << (2*(dom)))
#ifndef __ASSEMBLY__
#ifdef CONFIG_MMU
#ifdef CONFIG_CPU_USE_DOMAINS
#define set_domain(x) \
do { \
__asm__ __volatile__( \
......@@ -74,5 +78,28 @@
#define modify_domain(dom,type) do { } while (0)
#endif
/*
* Generate the T (user) versions of the LDR/STR and related
* instructions (inline assembly)
*/
#ifdef CONFIG_CPU_USE_DOMAINS
#define T(instr) #instr "t"
#else
#define T(instr) #instr
#endif
#endif /* !__ASSEMBLY__ */
#else /* __ASSEMBLY__ */
/*
* Generate the T (user) versions of the LDR/STR and related
* instructions
*/
#ifdef CONFIG_CPU_USE_DOMAINS
#define T(instr) instr ## t
#else
#define T(instr) instr
#endif
#endif /* __ASSEMBLY__ */
#endif /* !__ASM_PROC_DOMAIN_H */
......@@ -99,6 +99,8 @@ struct elf32_hdr;
extern int elf_check_arch(const struct elf32_hdr *);
#define elf_check_arch elf_check_arch
#define vmcore_elf64_check_arch(x) (0)
extern int arm_elf_read_implies_exec(const struct elf32_hdr *, int);
#define elf_read_implies_exec(ex,stk) arm_elf_read_implies_exec(&(ex), stk)
......
/*
* Interrupt handling. Preserves r7, r8, r9
*/
.macro arch_irq_handler_default
get_irqnr_preamble r5, lr
1: get_irqnr_and_base r0, r6, r5, lr
movne r1, sp
@
@ routine called with r0 = irq number, r1 = struct pt_regs *
@
adrne lr, BSYM(1b)
bne asm_do_IRQ
#ifdef CONFIG_SMP
/*
* XXX
*
* this macro assumes that irqstat (r6) and base (r5) are
* preserved from get_irqnr_and_base above
*/
ALT_SMP(test_for_ipi r0, r6, r5, lr)
ALT_UP_B(9997f)
movne r1, sp
adrne lr, BSYM(1b)
bne do_IPI
#ifdef CONFIG_LOCAL_TIMERS
test_for_ltirq r0, r6, r5, lr
movne r0, sp
adrne lr, BSYM(1b)
bne do_local_timer
#endif
#endif
9997:
.endm
.macro arch_irq_handler, symbol_name
.align 5
.global \symbol_name
\symbol_name:
mov r4, lr
arch_irq_handler_default
mov pc, r4
.endm
......@@ -13,12 +13,13 @@
#include <linux/preempt.h>
#include <linux/uaccess.h>
#include <asm/errno.h>
#include <asm/domain.h>
#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
__asm__ __volatile__( \
"1: ldrt %1, [%2]\n" \
"1: " T(ldr) " %1, [%2]\n" \
" " insn "\n" \
"2: strt %0, [%2]\n" \
"2: " T(str) " %0, [%2]\n" \
" mov %0, #0\n" \
"3:\n" \
" .pushsection __ex_table,\"a\"\n" \
......@@ -97,10 +98,10 @@ futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval)
pagefault_disable(); /* implies preempt_disable() */
__asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
"1: ldrt %0, [%3]\n"
"1: " T(ldr) " %0, [%3]\n"
" teq %0, %1\n"
" it eq @ explicit IT needed for the 2b label\n"
"2: streqt %2, [%3]\n"
"2: " T(streq) " %2, [%3]\n"
"3:\n"
" .pushsection __ex_table,\"a\"\n"
" .align 3\n"
......
......@@ -5,13 +5,31 @@
#include <linux/threads.h>
#include <asm/irq.h>
#define NR_IPI 5
typedef struct {
unsigned int __softirq_pending;
#ifdef CONFIG_LOCAL_TIMERS
unsigned int local_timer_irqs;
#endif
#ifdef CONFIG_SMP
unsigned int ipi_irqs[NR_IPI];
#endif
} ____cacheline_aligned irq_cpustat_t;
#include <linux/irq_cpustat.h> /* Standard mappings for irq_cpustat_t above */
#define __inc_irq_stat(cpu, member) __IRQ_STAT(cpu, member)++
#define __get_irq_stat(cpu, member) __IRQ_STAT(cpu, member)
#ifdef CONFIG_SMP
u64 smp_irq_stat_cpu(unsigned int cpu);
#else
#define smp_irq_stat_cpu(cpu) 0
#endif
#define arch_irq_stat_cpu smp_irq_stat_cpu
#if NR_IRQS > 512
#define HARDIRQ_BITS 10
#elif NR_IRQS > 256
......
/*
* arch/arm/include/asm/hardware/entry-macro-gic.S
*
* Low-level IRQ helper macros for GIC
*
* This file is licensed under the terms of the GNU General Public
* License version 2. This program is licensed "as is" without any
* warranty of any kind, whether express or implied.
*/
#include <asm/hardware/gic.h>
#ifndef HAVE_GET_IRQNR_PREAMBLE
.macro get_irqnr_preamble, base, tmp
ldr \base, =gic_cpu_base_addr
ldr \base, [\base]
.endm
#endif
/*
* The interrupt numbering scheme is defined in the
* interrupt controller spec. To wit:
*
* Interrupts 0-15 are IPI
* 16-28 are reserved
* 29-31 are local. We allow 30 to be used for the watchdog.
* 32-1020 are global
* 1021-1022 are reserved
* 1023 is "spurious" (no interrupt)
*
* For now, we ignore all local interrupts so only return an interrupt if it's
* between 30 and 1020. The test_for_ipi routine below will pick up on IPIs.
*
* A simple read from the controller will tell us the number of the highest
* priority enabled interrupt. We then just need to check whether it is in the
* valid range for an IRQ (30-1020 inclusive).
*/
.macro get_irqnr_and_base, irqnr, irqstat, base, tmp
ldr \irqstat, [\base, #GIC_CPU_INTACK]
/* bits 12-10 = src CPU, 9-0 = int # */
ldr \tmp, =1021
bic \irqnr, \irqstat, #0x1c00
cmp \irqnr, #29
cmpcc \irqnr, \irqnr
cmpne \irqnr, \tmp
cmpcs \irqnr, \irqnr
.endm
/* We assume that irqstat (the raw value of the IRQ acknowledge
* register) is preserved from the macro above.
* If there is an IPI, we immediately signal end of interrupt on the
* controller, since this requires the original irqstat value which
* we won't easily be able to recreate later.
*/
.macro test_for_ipi, irqnr, irqstat, base, tmp
bic \irqnr, \irqstat, #0x1c00
cmp \irqnr, #16
strcc \irqstat, [\base, #GIC_CPU_EOI]
cmpcs \irqnr, \irqnr
.endm
/* As above, this assumes that irqstat and base are preserved.. */
.macro test_for_ltirq, irqnr, irqstat, base, tmp
bic \irqnr, \irqstat, #0x1c00
mov \tmp, #0
cmp \irqnr, #29
moveq \tmp, #1
streq \irqstat, [\base, #GIC_CPU_EOI]
cmp \tmp, #0
.endm
......@@ -33,10 +33,13 @@
#define GIC_DIST_SOFTINT 0xf00
#ifndef __ASSEMBLY__
void gic_dist_init(unsigned int gic_nr, void __iomem *base, unsigned int irq_start);
void gic_cpu_init(unsigned int gic_nr, void __iomem *base);
extern void __iomem *gic_cpu_base_addr;
void gic_init(unsigned int, unsigned int, void __iomem *, void __iomem *);
void gic_secondary_init(unsigned int);
void gic_cascade_irq(unsigned int gic_nr, unsigned int irq);
void gic_raise_softirq(const struct cpumask *mask, unsigned int irq);
void gic_enable_ppi(unsigned int);
#endif
#endif
......@@ -20,7 +20,7 @@ struct arch_hw_breakpoint_ctrl {
struct arch_hw_breakpoint {
u32 address;
u32 trigger;
struct perf_event *suspended_wp;
struct arch_hw_breakpoint_ctrl step_ctrl;
struct arch_hw_breakpoint_ctrl ctrl;
};
......
......@@ -241,18 +241,15 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
*
*/
#ifndef __arch_ioremap
#define ioremap(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE)
#define ioremap_nocache(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE)
#define ioremap_cached(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE_CACHED)
#define ioremap_wc(cookie,size) __arm_ioremap(cookie, size, MT_DEVICE_WC)
#define iounmap(cookie) __iounmap(cookie)
#else
#define __arch_ioremap __arm_ioremap
#define __arch_iounmap __iounmap
#endif
#define ioremap(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE)
#define ioremap_nocache(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE)
#define ioremap_cached(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE_CACHED)
#define ioremap_wc(cookie,size) __arch_ioremap((cookie), (size), MT_DEVICE_WC)
#define iounmap(cookie) __arch_iounmap(cookie)
#endif
#define iounmap __arch_iounmap
/*
* io{read,write}{8,16,32} macros
......
......@@ -33,10 +33,20 @@ static inline void crash_setup_regs(struct pt_regs *newregs,
if (oldregs) {
memcpy(newregs, oldregs, sizeof(*newregs));
} else {
__asm__ __volatile__ ("stmia %0, {r0 - r15}"
: : "r" (&newregs->ARM_r0));
__asm__ __volatile__ ("mrs %0, cpsr"
: "=r" (newregs->ARM_cpsr));
__asm__ __volatile__ (
"stmia %[regs_base], {r0-r12}\n\t"
"mov %[_ARM_sp], sp\n\t"
"str lr, %[_ARM_lr]\n\t"
"adr %[_ARM_pc], 1f\n\t"
"mrs %[_ARM_cpsr], cpsr\n\t"
"1:"
: [_ARM_pc] "=r" (newregs->ARM_pc),
[_ARM_cpsr] "=r" (newregs->ARM_cpsr),
[_ARM_sp] "=r" (newregs->ARM_sp),
[_ARM_lr] "=o" (newregs->ARM_lr)
: [regs_base] "r" (&newregs->ARM_r0)
: "memory"
);
}
}
......
......@@ -30,7 +30,6 @@ asmlinkage void do_local_timer(struct pt_regs *);
#include "smp_twd.h"
#define local_timer_ack() twd_timer_ack()
#define local_timer_stop() twd_timer_stop()
#else
......@@ -40,11 +39,6 @@ asmlinkage void do_local_timer(struct pt_regs *);
*/
int local_timer_ack(void);
/*
* Stop a local timer interrupt.
*/
void local_timer_stop(void);
#endif
/*
......@@ -52,12 +46,6 @@ void local_timer_stop(void);
*/
void local_timer_setup(struct clock_event_device *);
#else
static inline void local_timer_stop(void)
{
}
#endif
#endif
......@@ -37,11 +37,20 @@ struct machine_desc {
struct meminfo *);
void (*reserve)(void);/* reserve mem blocks */
void (*map_io)(void);/* IO mapping function */
void (*init_early)(void);
void (*init_irq)(void);
struct sys_timer *timer; /* system tick timer */
void (*init_machine)(void);
#ifdef CONFIG_MULTI_IRQ_HANDLER
void (*handle_irq)(struct pt_regs *);
#endif
};
/*
* Current machine - only accessible during boot.
*/
extern struct machine_desc *machine_desc;
/*
* Set of macros to define architecture features. This is built into
* a table by the linker.
......
......@@ -17,10 +17,12 @@ struct seq_file;
/*
* This is internal. Do not use it.
*/
extern unsigned int arch_nr_irqs;
extern void (*init_arch_irq)(void);
extern void init_FIQ(void);
extern int show_fiq_list(struct seq_file *, void *);
extern int show_fiq_list(struct seq_file *, int);
#ifdef CONFIG_MULTI_IRQ_HANDLER
extern void (*handle_arch_irq)(struct pt_regs *);
#endif
/*
* This is for easy migration, but should be changed in the source
......
......@@ -43,7 +43,6 @@ struct sys_timer {
#endif
};
extern struct sys_timer *system_timer;
extern void timer_tick(void);
#endif
......@@ -8,11 +8,6 @@
struct unwind_table;
#ifdef CONFIG_ARM_UNWIND
struct arm_unwind_mapping {
Elf_Shdr *unw_sec;
Elf_Shdr *sec_text;
struct unwind_table *unwind;
};
enum {
ARM_SEC_INIT,
ARM_SEC_DEVINIT,
......@@ -21,13 +16,13 @@ enum {
ARM_SEC_DEVEXIT,
ARM_SEC_MAX,
};
#endif
struct mod_arch_specific {
struct arm_unwind_mapping map[ARM_SEC_MAX];
};
#else
struct mod_arch_specific {
};
#ifdef CONFIG_ARM_UNWIND
struct unwind_table *unwind[ARM_SEC_MAX];
#endif
};
/*
* Include the ARM architecture version.
......
......@@ -151,13 +151,15 @@ extern void __cpu_copy_user_highpage(struct page *to, struct page *from,
#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE)
extern void copy_page(void *to, const void *from);
typedef unsigned long pteval_t;
#undef STRICT_MM_TYPECHECKS
#ifdef STRICT_MM_TYPECHECKS
/*
* These are used to make use of C type-checking..
*/
typedef struct { unsigned long pte; } pte_t;
typedef struct { pteval_t pte; } pte_t;
typedef struct { unsigned long pmd; } pmd_t;
typedef struct { unsigned long pgd[2]; } pgd_t;
typedef struct { unsigned long pgprot; } pgprot_t;
......@@ -175,7 +177,7 @@ typedef struct { unsigned long pgprot; } pgprot_t;
/*
* .. while these make it easier on the compiler
*/
typedef unsigned long pte_t;
typedef pteval_t pte_t;
typedef unsigned long pmd_t;
typedef unsigned long pgd_t[2];
typedef unsigned long pgprot_t;
......
......@@ -30,14 +30,16 @@
#define pmd_free(mm, pmd) do { } while (0)
#define pgd_populate(mm,pmd,pte) BUG()
extern pgd_t *get_pgd_slow(struct mm_struct *mm);
extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
#define pgd_alloc(mm) get_pgd_slow(mm)
#define pgd_free(mm, pgd) free_pgd_slow(mm, pgd)
extern pgd_t *pgd_alloc(struct mm_struct *mm);
extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
#define PGALLOC_GFP (GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO)
static inline void clean_pte_table(pte_t *pte)
{
clean_dcache_area(pte + PTE_HWTABLE_PTRS, PTE_HWTABLE_SIZE);
}
/*
* Allocate one PTE table.
*
......@@ -45,14 +47,14 @@ extern void free_pgd_slow(struct mm_struct *mm, pgd_t *pgd);
* into one table thus:
*
* +------------+
* | h/w pt 0 |
* +------------+
* | h/w pt 1 |
* +------------+
* | Linux pt 0 |
* +------------+
* | Linux pt 1 |
* +------------+
* | h/w pt 0 |
* +------------+
* | h/w pt 1 |
* +------------+
*/
static inline pte_t *
pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
......@@ -60,10 +62,8 @@ pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr)
pte_t *pte;
pte = (pte_t *)__get_free_page(PGALLOC_GFP);
if (pte) {
clean_dcache_area(pte, sizeof(pte_t) * PTRS_PER_PTE);
pte += PTRS_PER_PTE;
}
if (pte)
clean_pte_table(pte);
return pte;
}
......@@ -79,10 +79,8 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr)
pte = alloc_pages(PGALLOC_GFP, 0);
#endif
if (pte) {
if (!PageHighMem(pte)) {
void *page = page_address(pte);
clean_dcache_area(page, sizeof(pte_t) * PTRS_PER_PTE);
}
if (!PageHighMem(pte))
clean_pte_table(page_address(pte));
pgtable_page_ctor(pte);
}
......@@ -94,10 +92,8 @@ pte_alloc_one(struct mm_struct *mm, unsigned long addr)
*/
static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
{
if (pte) {
pte -= PTRS_PER_PTE;
if (pte)
free_page((unsigned long)pte);
}
}
static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
......@@ -106,8 +102,10 @@ static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
__free_page(pte);
}
static inline void __pmd_populate(pmd_t *pmdp, unsigned long pmdval)
static inline void __pmd_populate(pmd_t *pmdp, phys_addr_t pte,
unsigned long prot)
{
unsigned long pmdval = (pte + PTE_HWTABLE_OFF) | prot;
pmdp[0] = __pmd(pmdval);
pmdp[1] = __pmd(pmdval + 256 * sizeof(pte_t));
flush_pmd_entry(pmdp);
......@@ -122,20 +120,16 @@ static inline void __pmd_populate(pmd_t *pmdp, unsigned long pmdval)
static inline void
pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, pte_t *ptep)
{
unsigned long pte_ptr = (unsigned long)ptep;
/*
* The pmd must be loaded with the physical
* address of the PTE table
* The pmd must be loaded with the physical address of the PTE table
*/
pte_ptr -= PTRS_PER_PTE * sizeof(void *);
__pmd_populate(pmdp, __pa(pte_ptr) | _PAGE_KERNEL_TABLE);
__pmd_populate(pmdp, __pa(ptep), _PAGE_KERNEL_TABLE);
}
static inline void
pmd_populate(struct mm_struct *mm, pmd_t *pmdp, pgtable_t ptep)
{
__pmd_populate(pmdp, page_to_pfn(ptep) << PAGE_SHIFT | _PAGE_USER_TABLE);
__pmd_populate(pmdp, page_to_phys(ptep), _PAGE_USER_TABLE);
}
#define pmd_pgtable(pmd) pmd_page(pmd)
......
This diff is collapsed.
/*
* sched_clock.h: support for extending counters to full 64-bit ns counter
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#ifndef ASM_SCHED_CLOCK
#define ASM_SCHED_CLOCK
#include <linux/kernel.h>
#include <linux/types.h>
struct clock_data {
u64 epoch_ns;
u32 epoch_cyc;
u32 epoch_cyc_copy;
u32 mult;
u32 shift;
};
#define DEFINE_CLOCK_DATA(name) struct clock_data name
static inline u64 cyc_to_ns(u64 cyc, u32 mult, u32 shift)
{
return (cyc * mult) >> shift;
}
/*
* Atomically update the sched_clock epoch. Your update callback will
* be called from a timer before the counter wraps - read the current
* counter value, and call this function to safely move the epochs
* forward. Only use this from the update callback.
*/
static inline void update_sched_clock(struct clock_data *cd, u32 cyc, u32 mask)
{
unsigned long flags;
u64 ns = cd->epoch_ns +
cyc_to_ns((cyc - cd->epoch_cyc) & mask, cd->mult, cd->shift);
/*
* Write epoch_cyc and epoch_ns in a way that the update is
* detectable in cyc_to_fixed_sched_clock().
*/
raw_local_irq_save(flags);
cd->epoch_cyc = cyc;
smp_wmb();
cd->epoch_ns = ns;
smp_wmb();
cd->epoch_cyc_copy = cyc;
raw_local_irq_restore(flags);
}
/*
* If your clock rate is known at compile time, using this will allow
* you to optimize the mult/shift loads away. This is paired with
* init_fixed_sched_clock() to ensure that your mult/shift are correct.
*/
static inline unsigned long long cyc_to_fixed_sched_clock(struct clock_data *cd,
u32 cyc, u32 mask, u32 mult, u32 shift)
{
u64 epoch_ns;
u32 epoch_cyc;
/*
* Load the epoch_cyc and epoch_ns atomically. We do this by
* ensuring that we always write epoch_cyc, epoch_ns and
* epoch_cyc_copy in strict order, and read them in strict order.
* If epoch_cyc and epoch_cyc_copy are not equal, then we're in
* the middle of an update, and we should repeat the load.
*/
do {
epoch_cyc = cd->epoch_cyc;
smp_rmb();
epoch_ns = cd->epoch_ns;
smp_rmb();
} while (epoch_cyc != cd->epoch_cyc_copy);
return epoch_ns + cyc_to_ns((cyc - epoch_cyc) & mask, mult, shift);
}
/*
* Otherwise, you need to use this, which will obtain the mult/shift
* from the clock_data structure. Use init_sched_clock() with this.
*/
static inline unsigned long long cyc_to_sched_clock(struct clock_data *cd,
u32 cyc, u32 mask)
{
return cyc_to_fixed_sched_clock(cd, cyc, mask, cd->mult, cd->shift);
}
/*
* Initialize the clock data - calculate the appropriate multiplier
* and shift. Also setup a timer to ensure that the epoch is refreshed
* at the appropriate time interval, which will call your update
* handler.
*/
void init_sched_clock(struct clock_data *, void (*)(void),
unsigned int, unsigned long);
/*
* Use this initialization function rather than init_sched_clock() if
* you're using cyc_to_fixed_sched_clock, which will warn if your
* constants are incorrect.
*/
static inline void init_fixed_sched_clock(struct clock_data *cd,
void (*update)(void), unsigned int bits, unsigned long rate,
u32 mult, u32 shift)
{
init_sched_clock(cd, update, bits, rate);
if (cd->mult != mult || cd->shift != shift) {
pr_crit("sched_clock: wrong multiply/shift: %u>>%u vs calculated %u>>%u\n"
"sched_clock: fix multiply/shift to avoid scheduler hiccups\n",
mult, shift, cd->mult, cd->shift);
}
}
#endif
......@@ -33,27 +33,23 @@ struct seq_file;
/*
* generate IPI list text
*/
extern void show_ipi_list(struct seq_file *p);
extern void show_ipi_list(struct seq_file *, int);
/*
* Called from assembly code, this handles an IPI.
*/
asmlinkage void do_IPI(struct pt_regs *regs);
asmlinkage void do_IPI(int ipinr, struct pt_regs *regs);
/*
* Setup the set of possible CPUs (via set_cpu_possible)
*/
extern void smp_init_cpus(void);
/*
* Move global data into per-processor storage.
*/
extern void smp_store_cpu_info(unsigned int cpuid);
/*
* Raise an IPI cross call on CPUs in callmap.
*/
extern void smp_cross_call(const struct cpumask *mask);
extern void smp_cross_call(const struct cpumask *mask, int ipi);
/*
* Boot a secondary CPU, and assign it the specified idle task.
......@@ -72,6 +68,11 @@ asmlinkage void secondary_start_kernel(void);
*/
extern void platform_secondary_init(unsigned int cpu);
/*
* Initialize cpu_possible map, and enable coherency
*/
extern void platform_smp_prepare_cpus(unsigned int);
/*
* Initial data for bringing up a secondary CPU.
*/
......@@ -97,6 +98,6 @@ extern void arch_send_call_function_ipi_mask(const struct cpumask *mask);
/*
* show local interrupt info
*/
extern void show_local_irqs(struct seq_file *);
extern void show_local_irqs(struct seq_file *, int);
#endif /* ifndef __ASM_ARM_SMP_H */
#ifndef ASMARM_SMP_MIDR_H
#define ASMARM_SMP_MIDR_H
#define hard_smp_processor_id() \
({ \
unsigned int cpunum; \
__asm__("\n" \
"1: mrc p15, 0, %0, c0, c0, 5\n" \
" .pushsection \".alt.smp.init\", \"a\"\n"\
" .long 1b\n" \
" mov %0, #0\n" \
" .popsection" \
: "=r" (cpunum)); \
cpunum &= 0x0F; \
})
#endif
......@@ -22,7 +22,6 @@ struct clock_event_device;
extern void __iomem *twd_base;
void twd_timer_stop(void);
int twd_timer_ack(void);
void twd_timer_setup(struct clock_event_device *);
......
......@@ -63,6 +63,11 @@
#include <asm/outercache.h>
#define __exception __attribute__((section(".exception.text")))
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
#define __exception_irq_entry __irq_entry
#else
#define __exception_irq_entry __exception
#endif
struct thread_info;
struct task_struct;
......@@ -119,6 +124,13 @@ extern unsigned int user_debug;
#define vectors_high() (0)
#endif
#if __LINUX_ARM_ARCH__ >= 7 || \
(__LINUX_ARM_ARCH__ == 6 && defined(CONFIG_CPU_32v6K))
#define sev() __asm__ __volatile__ ("sev" : : : "memory")
#define wfe() __asm__ __volatile__ ("wfe" : : : "memory")
#define wfi() __asm__ __volatile__ ("wfi" : : : "memory")
#endif
#if __LINUX_ARM_ARCH__ >= 7
#define isb() __asm__ __volatile__ ("isb" : : : "memory")
#define dsb() __asm__ __volatile__ ("dsb" : : : "memory")
......
......@@ -15,16 +15,37 @@ struct undef_hook {
void register_undef_hook(struct undef_hook *hook);
void unregister_undef_hook(struct undef_hook *hook);
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
static inline int __in_irqentry_text(unsigned long ptr)
{
extern char __irqentry_text_start[];
extern char __irqentry_text_end[];
return ptr >= (unsigned long)&__irqentry_text_start &&
ptr < (unsigned long)&__irqentry_text_end;
}
#else
static inline int __in_irqentry_text(unsigned long ptr)
{
return 0;
}
#endif
static inline int in_exception_text(unsigned long ptr)
{
extern char __exception_text_start[];
extern char __exception_text_end[];
int in;
return ptr >= (unsigned long)&__exception_text_start &&
in = ptr >= (unsigned long)&__exception_text_start &&
ptr < (unsigned long)&__exception_text_end;
return in ? : __in_irqentry_text(ptr);
}
extern void __init early_trap_init(void);
extern void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame);
extern void *vectors_page;
#endif
......@@ -227,7 +227,7 @@ do { \
#define __get_user_asm_byte(x,addr,err) \
__asm__ __volatile__( \
"1: ldrbt %1,[%2]\n" \
"1: " T(ldrb) " %1,[%2],#0\n" \
"2:\n" \
" .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
......@@ -263,7 +263,7 @@ do { \
#define __get_user_asm_word(x,addr,err) \
__asm__ __volatile__( \
"1: ldrt %1,[%2]\n" \
"1: " T(ldr) " %1,[%2],#0\n" \
"2:\n" \
" .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
......@@ -308,7 +308,7 @@ do { \
#define __put_user_asm_byte(x,__pu_addr,err) \
__asm__ __volatile__( \
"1: strbt %1,[%2]\n" \
"1: " T(strb) " %1,[%2],#0\n" \
"2:\n" \
" .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
......@@ -341,7 +341,7 @@ do { \
#define __put_user_asm_word(x,__pu_addr,err) \
__asm__ __volatile__( \
"1: strt %1,[%2]\n" \
"1: " T(str) " %1,[%2],#0\n" \
"2:\n" \
" .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
......@@ -366,10 +366,10 @@ do { \
#define __put_user_asm_dword(x,__pu_addr,err) \
__asm__ __volatile__( \
ARM( "1: strt " __reg_oper1 ", [%1], #4\n" ) \
ARM( "2: strt " __reg_oper0 ", [%1]\n" ) \
THUMB( "1: strt " __reg_oper1 ", [%1]\n" ) \
THUMB( "2: strt " __reg_oper0 ", [%1, #4]\n" ) \
ARM( "1: " T(str) " " __reg_oper1 ", [%1], #4\n" ) \
ARM( "2: " T(str) " " __reg_oper0 ", [%1]\n" ) \
THUMB( "1: " T(str) " " __reg_oper1 ", [%1]\n" ) \
THUMB( "2: " T(str) " " __reg_oper0 ", [%1, #4]\n" ) \
"3:\n" \
" .pushsection .fixup,\"ax\"\n" \
" .align 2\n" \
......
......@@ -5,7 +5,7 @@
CPPFLAGS_vmlinux.lds := -DTEXT_OFFSET=$(TEXT_OFFSET)
AFLAGS_head.o := -DTEXT_OFFSET=$(TEXT_OFFSET)
ifdef CONFIG_DYNAMIC_FTRACE
ifdef CONFIG_FUNCTION_TRACER
CFLAGS_REMOVE_ftrace.o = -pg
endif
......@@ -29,10 +29,12 @@ obj-$(CONFIG_MODULES) += armksyms.o module.o
obj-$(CONFIG_ARTHUR) += arthur.o
obj-$(CONFIG_ISA_DMA) += dma-isa.o
obj-$(CONFIG_PCI) += bios32.o isa.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_HAVE_SCHED_CLOCK) += sched_clock.o
obj-$(CONFIG_SMP) += smp.o smp_tlb.o
obj-$(CONFIG_HAVE_ARM_SCU) += smp_scu.o
obj-$(CONFIG_HAVE_ARM_TWD) += smp_twd.o
obj-$(CONFIG_DYNAMIC_FTRACE) += ftrace.o
obj-$(CONFIG_FUNCTION_GRAPH_TRACER) += ftrace.o
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
obj-$(CONFIG_KPROBES) += kprobes.o kprobes-decode.o
obj-$(CONFIG_ATAGS_PROC) += atags.o
......@@ -42,6 +44,8 @@ obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_ARM_UNWIND) += unwind.o
obj-$(CONFIG_HAVE_TCM) += tcm.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_SWP_EMULATE) += swp_emulate.o
CFLAGS_swp_emulate.o := -Wa,-march=armv7-a
obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
obj-$(CONFIG_CRUNCH) += crunch.o crunch-bits.o
......@@ -50,6 +54,7 @@ AFLAGS_crunch-bits.o := -Wa,-mcpu=ep9312
obj-$(CONFIG_CPU_XSCALE) += xscale-cp0.o
obj-$(CONFIG_CPU_XSC3) += xscale-cp0.o
obj-$(CONFIG_CPU_MOHAWK) += xscale-cp0.o
obj-$(CONFIG_CPU_PJ4) += pj4-cp0.o
obj-$(CONFIG_IWMMXT) += iwmmxt.o
obj-$(CONFIG_CPU_HAS_PMU) += pmu.o
obj-$(CONFIG_HW_PERF_EVENTS) += perf_event.o
......
......@@ -25,42 +25,22 @@
#include <asm/tls.h>
#include "entry-header.S"
#include <asm/entry-macro-multi.S>
/*
* Interrupt handling. Preserves r7, r8, r9
*/
.macro irq_handler
get_irqnr_preamble r5, lr
1: get_irqnr_and_base r0, r6, r5, lr
movne r1, sp
@
@ routine called with r0 = irq number, r1 = struct pt_regs *
@
adrne lr, BSYM(1b)
bne asm_do_IRQ
#ifdef CONFIG_SMP
/*
* XXX
*
* this macro assumes that irqstat (r6) and base (r5) are
* preserved from get_irqnr_and_base above
*/
ALT_SMP(test_for_ipi r0, r6, r5, lr)
ALT_UP_B(9997f)
movne r0, sp
adrne lr, BSYM(1b)
bne do_IPI
#ifdef CONFIG_LOCAL_TIMERS
test_for_ltirq r0, r6, r5, lr
movne r0, sp
adrne lr, BSYM(1b)
bne do_local_timer
#ifdef CONFIG_MULTI_IRQ_HANDLER
ldr r5, =handle_arch_irq
mov r0, sp
ldr r5, [r5]
adr lr, BSYM(9997f)
teq r5, #0
movne pc, r5
#endif
arch_irq_handler_default
9997:
#endif
.endm
#ifdef CONFIG_KPROBES
......@@ -198,6 +178,7 @@ __dabt_svc:
@
@ set desired IRQ state, then call main handler
@
debug_entry r1
msr cpsr_c, r9
mov r2, sp
bl do_DataAbort
......@@ -324,6 +305,7 @@ __pabt_svc:
#else
bl CPU_PABORT_HANDLER
#endif
debug_entry r1
msr cpsr_c, r9 @ Maybe enable interrupts
mov r2, sp @ regs
bl do_PrefetchAbort @ call abort handler
......@@ -439,6 +421,7 @@ __dabt_usr:
@
@ IRQs on, then call the main handler
@
debug_entry r1
enable_irq
mov r2, sp
adr lr, BSYM(ret_from_exception)
......@@ -703,6 +686,7 @@ __pabt_usr:
#else
bl CPU_PABORT_HANDLER
#endif
debug_entry r1
enable_irq @ Enable interrupts
mov r2, sp @ regs
bl do_PrefetchAbort @ call abort handler
......@@ -735,7 +719,7 @@ ENTRY(__switch_to)
THUMB( stmia ip!, {r4 - sl, fp} ) @ Store most regs on stack
THUMB( str sp, [ip], #4 )
THUMB( str lr, [ip], #4 )
#ifdef CONFIG_MMU
#ifdef CONFIG_CPU_USE_DOMAINS
ldr r6, [r2, #TI_CPU_DOMAIN]
#endif
set_tls r3, r4, r5
......@@ -744,7 +728,7 @@ ENTRY(__switch_to)
ldr r8, =__stack_chk_guard
ldr r7, [r7, #TSK_STACK_CANARY]
#endif
#ifdef CONFIG_MMU
#ifdef CONFIG_CPU_USE_DOMAINS
mcr p15, 0, r6, c3, c0, 0 @ Set domain register
#endif
mov r5, r0
......@@ -842,7 +826,7 @@ __kuser_helper_start:
*/
__kuser_memory_barrier: @ 0xffff0fa0
smp_dmb
smp_dmb arm
usr_ret lr
.align 5
......@@ -959,7 +943,7 @@ kuser_cmpxchg_fixup:
#else
smp_dmb
smp_dmb arm
1: ldrex r3, [r2]
subs r3, r3, r0
strexeq r3, r1, [r2]
......@@ -1245,3 +1229,9 @@ cr_alignment:
.space 4
cr_no_alignment:
.space 4
#ifdef CONFIG_MULTI_IRQ_HANDLER
.globl handle_arch_irq
handle_arch_irq:
.space 4
#endif
......@@ -147,98 +147,170 @@ ENDPROC(ret_from_fork)
#endif
#endif
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(__gnu_mcount_nc)
mov ip, lr
ldmia sp!, {lr}
mov pc, ip
ENDPROC(__gnu_mcount_nc)
.macro __mcount suffix
mcount_enter
ldr r0, =ftrace_trace_function
ldr r2, [r0]
adr r0, .Lftrace_stub
cmp r0, r2
bne 1f
ENTRY(ftrace_caller)
stmdb sp!, {r0-r3, lr}
mov r0, lr
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ldr r1, =ftrace_graph_return
ldr r2, [r1]
cmp r0, r2
bne ftrace_graph_caller\suffix
ldr r1, =ftrace_graph_entry
ldr r2, [r1]
ldr r0, =ftrace_graph_entry_stub
cmp r0, r2
bne ftrace_graph_caller\suffix
#endif
mcount_exit
1: mcount_get_lr r1 @ lr of instrumented func
mov r0, lr @ instrumented function
sub r0, r0, #MCOUNT_INSN_SIZE
adr lr, BSYM(2f)
mov pc, r2
2: mcount_exit
.endm
.macro __ftrace_caller suffix
mcount_enter
mcount_get_lr r1 @ lr of instrumented func
mov r0, lr @ instrumented function
sub r0, r0, #MCOUNT_INSN_SIZE
ldr r1, [sp, #20]
.global ftrace_call
ftrace_call:
.globl ftrace_call\suffix
ftrace_call\suffix:
bl ftrace_stub
ldmia sp!, {r0-r3, ip, lr}
mov pc, ip
ENDPROC(ftrace_caller)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl ftrace_graph_call\suffix
ftrace_graph_call\suffix:
mov r0, r0
#endif
mcount_exit
.endm
.macro __ftrace_graph_caller
sub r0, fp, #4 @ &lr of instrumented routine (&parent)
#ifdef CONFIG_DYNAMIC_FTRACE
@ called from __ftrace_caller, saved in mcount_enter
ldr r1, [sp, #16] @ instrumented routine (func)
#else
@ called from __mcount, untouched in lr
mov r1, lr @ instrumented routine (func)
#endif
sub r1, r1, #MCOUNT_INSN_SIZE
mov r2, fp @ frame pointer
bl prepare_ftrace_return
mcount_exit
.endm
#ifdef CONFIG_OLD_MCOUNT
/*
* mcount
*/
.macro mcount_enter
stmdb sp!, {r0-r3, lr}
.endm
.macro mcount_get_lr reg
ldr \reg, [fp, #-4]
.endm
.macro mcount_exit
ldr lr, [fp, #-4]
ldmia sp!, {r0-r3, pc}
.endm
ENTRY(mcount)
#ifdef CONFIG_DYNAMIC_FTRACE
stmdb sp!, {lr}
ldr lr, [fp, #-4]
ldmia sp!, {pc}
#else
__mcount _old
#endif
ENDPROC(mcount)
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(ftrace_caller_old)
stmdb sp!, {r0-r3, lr}
ldr r1, [fp, #-4]
mov r0, lr
sub r0, r0, #MCOUNT_INSN_SIZE
.globl ftrace_call_old
ftrace_call_old:
bl ftrace_stub
ldr lr, [fp, #-4] @ restore lr
ldmia sp!, {r0-r3, pc}
__ftrace_caller _old
ENDPROC(ftrace_caller_old)
#endif
#else
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller_old)
__ftrace_graph_caller
ENDPROC(ftrace_graph_caller_old)
#endif
ENTRY(__gnu_mcount_nc)
.purgem mcount_enter
.purgem mcount_get_lr
.purgem mcount_exit
#endif
/*
* __gnu_mcount_nc
*/
.macro mcount_enter
stmdb sp!, {r0-r3, lr}
ldr r0, =ftrace_trace_function
ldr r2, [r0]
adr r0, .Lftrace_stub
cmp r0, r2
bne gnu_trace
.endm
.macro mcount_get_lr reg
ldr \reg, [sp, #20]
.endm
.macro mcount_exit
ldmia sp!, {r0-r3, ip, lr}
mov pc, ip
.endm
gnu_trace:
ldr r1, [sp, #20] @ lr of instrumented routine
mov r0, lr
sub r0, r0, #MCOUNT_INSN_SIZE
adr lr, BSYM(1f)
mov pc, r2
1:
ldmia sp!, {r0-r3, ip, lr}
ENTRY(__gnu_mcount_nc)
#ifdef CONFIG_DYNAMIC_FTRACE
mov ip, lr
ldmia sp!, {lr}
mov pc, ip
#else
__mcount
#endif
ENDPROC(__gnu_mcount_nc)
#ifdef CONFIG_OLD_MCOUNT
/*
* This is under an ifdef in order to force link-time errors for people trying
* to build with !FRAME_POINTER with a GCC which doesn't use the new-style
* mcount.
*/
ENTRY(mcount)
stmdb sp!, {r0-r3, lr}
ldr r0, =ftrace_trace_function
ldr r2, [r0]
adr r0, ftrace_stub
cmp r0, r2
bne trace
ldr lr, [fp, #-4] @ restore lr
ldmia sp!, {r0-r3, pc}
#ifdef CONFIG_DYNAMIC_FTRACE
ENTRY(ftrace_caller)
__ftrace_caller
ENDPROC(ftrace_caller)
#endif
trace:
ldr r1, [fp, #-4] @ lr of instrumented routine
mov r0, lr
sub r0, r0, #MCOUNT_INSN_SIZE
mov lr, pc
mov pc, r2
ldr lr, [fp, #-4] @ restore lr
ldmia sp!, {r0-r3, pc}
ENDPROC(mcount)
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
ENTRY(ftrace_graph_caller)
__ftrace_graph_caller
ENDPROC(ftrace_graph_caller)
#endif
#endif /* CONFIG_DYNAMIC_FTRACE */
.purgem mcount_enter
.purgem mcount_get_lr
.purgem mcount_exit
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
.globl return_to_handler
return_to_handler:
stmdb sp!, {r0-r3}
mov r0, fp @ frame pointer
bl ftrace_return_to_handler
mov lr, r0 @ r0 has real ret addr
ldmia sp!, {r0-r3}
mov pc, lr
#endif
ENTRY(ftrace_stub)
.Lftrace_stub:
......
......@@ -165,6 +165,25 @@
.endm
#endif /* !CONFIG_THUMB2_KERNEL */
@
@ Debug exceptions are taken as prefetch or data aborts.
@ We must disable preemption during the handler so that
@ we can access the debug registers safely.
@
.macro debug_entry, fsr
#if defined(CONFIG_HAVE_HW_BREAKPOINT) && defined(CONFIG_PREEMPT)
ldr r4, =0x40f @ mask out fsr.fs
and r5, r4, \fsr
cmp r5, #2 @ debug exception
bne 1f
get_thread_info r10
ldr r6, [r10, #TI_PREEMPT] @ get preempt count
add r11, r6, #1 @ increment it
str r11, [r10, #TI_PREEMPT]
1:
#endif
.endm
/*
* These are the registers used in the syscall handler, and allow us to
* have in theory up to 7 arguments to a function - r0 to r6.
......
......@@ -45,6 +45,7 @@
#include <asm/fiq.h>
#include <asm/irq.h>
#include <asm/system.h>
#include <asm/traps.h>
static unsigned long no_fiq_insn;
......@@ -67,17 +68,22 @@ static struct fiq_handler default_owner = {
static struct fiq_handler *current_fiq = &default_owner;
int show_fiq_list(struct seq_file *p, void *v)
int show_fiq_list(struct seq_file *p, int prec)
{
if (current_fiq != &default_owner)
seq_printf(p, "FIQ: %s\n", current_fiq->name);
seq_printf(p, "%*s: %s\n", prec, "FIQ",
current_fiq->name);
return 0;
}
void set_fiq_handler(void *start, unsigned int length)
{
#if defined(CONFIG_CPU_USE_DOMAINS)
memcpy((void *)0xffff001c, start, length);
#else
memcpy(vectors_page + 0x1c, start, length);
#endif
flush_icache_range(0xffff001c, 0xffff001c + length);
if (!vectors_high())
flush_icache_range(0x1c, 0x1c + length);
......
......@@ -24,6 +24,7 @@
#define NOP 0xe8bd4000 /* pop {lr} */
#endif
#ifdef CONFIG_DYNAMIC_FTRACE
#ifdef CONFIG_OLD_MCOUNT
#define OLD_MCOUNT_ADDR ((unsigned long) mcount)
#define OLD_FTRACE_ADDR ((unsigned long) ftrace_caller_old)
......@@ -59,9 +60,9 @@ static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
}
#endif
/* construct a branch (BL) instruction to addr */
#ifdef CONFIG_THUMB2_KERNEL
static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr,
bool link)
{
unsigned long s, j1, j2, i1, i2, imm10, imm11;
unsigned long first, second;
......@@ -83,15 +84,22 @@ static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
j2 = (!i2) ^ s;
first = 0xf000 | (s << 10) | imm10;
second = 0xd000 | (j1 << 13) | (j2 << 11) | imm11;
second = 0x9000 | (j1 << 13) | (j2 << 11) | imm11;
if (link)
second |= 1 << 14;
return (second << 16) | first;
}
#else
static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
static unsigned long ftrace_gen_branch(unsigned long pc, unsigned long addr,
bool link)
{
unsigned long opcode = 0xea000000;
long offset;
if (link)
opcode |= 1 << 24;
offset = (long)addr - (long)(pc + 8);
if (unlikely(offset < -33554432 || offset > 33554428)) {
/* Can't generate branches that far (from ARM ARM). Ftrace
......@@ -103,10 +111,15 @@ static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
offset = (offset >> 2) & 0x00ffffff;
return 0xeb000000 | offset;
return opcode | offset;
}
#endif
static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
{
return ftrace_gen_branch(pc, addr, true);
}
static int ftrace_modify_code(unsigned long pc, unsigned long old,
unsigned long new)
{
......@@ -193,3 +206,83 @@ int __init ftrace_dyn_arch_init(void *data)
return 0;
}
#endif /* CONFIG_DYNAMIC_FTRACE */
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
unsigned long frame_pointer)
{
unsigned long return_hooker = (unsigned long) &return_to_handler;
struct ftrace_graph_ent trace;
unsigned long old;
int err;
if (unlikely(atomic_read(&current->tracing_graph_pause)))
return;
old = *parent;
*parent = return_hooker;
err = ftrace_push_return_trace(old, self_addr, &trace.depth,
frame_pointer);
if (err == -EBUSY) {
*parent = old;
return;
}
trace.func = self_addr;
/* Only trace if the calling function expects to */
if (!ftrace_graph_entry(&trace)) {
current->curr_ret_stack--;
*parent = old;
}
}
#ifdef CONFIG_DYNAMIC_FTRACE
extern unsigned long ftrace_graph_call;
extern unsigned long ftrace_graph_call_old;
extern void ftrace_graph_caller_old(void);
static int __ftrace_modify_caller(unsigned long *callsite,
void (*func) (void), bool enable)
{
unsigned long caller_fn = (unsigned long) func;
unsigned long pc = (unsigned long) callsite;
unsigned long branch = ftrace_gen_branch(pc, caller_fn, false);
unsigned long nop = 0xe1a00000; /* mov r0, r0 */
unsigned long old = enable ? nop : branch;
unsigned long new = enable ? branch : nop;
return ftrace_modify_code(pc, old, new);
}
static int ftrace_modify_graph_caller(bool enable)
{
int ret;
ret = __ftrace_modify_caller(&ftrace_graph_call,
ftrace_graph_caller,
enable);
#ifdef CONFIG_OLD_MCOUNT
if (!ret)
ret = __ftrace_modify_caller(&ftrace_graph_call_old,
ftrace_graph_caller_old,
enable);
#endif
return ret;
}
int ftrace_enable_ftrace_graph_caller(void)
{
return ftrace_modify_graph_caller(true);
}
int ftrace_disable_ftrace_graph_caller(void)
{
return ftrace_modify_graph_caller(false);
}
#endif /* CONFIG_DYNAMIC_FTRACE */
#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
......@@ -91,6 +91,11 @@ ENTRY(stext)
movs r8, r5 @ invalid machine (r5=0)?
THUMB( it eq ) @ force fixup-able long branch encoding
beq __error_a @ yes, error 'a'
/*
* r1 = machine no, r2 = atags,
* r8 = machinfo, r9 = cpuid, r10 = procinfo
*/
bl __vet_atags
#ifdef CONFIG_SMP_ON_UP
bl __fixup_smp
......@@ -387,19 +392,19 @@ ENDPROC(__turn_mmu_on)
#ifdef CONFIG_SMP_ON_UP
__fixup_smp:
mov r7, #0x00070000
orr r6, r7, #0xff000000 @ mask 0xff070000
orr r7, r7, #0x41000000 @ val 0x41070000
and r0, r9, r6
teq r0, r7 @ ARM CPU and ARMv6/v7?
mov r4, #0x00070000
orr r3, r4, #0xff000000 @ mask 0xff070000
orr r4, r4, #0x41000000 @ val 0x41070000
and r0, r9, r3
teq r0, r4 @ ARM CPU and ARMv6/v7?
bne __fixup_smp_on_up @ no, assume UP
orr r6, r6, #0x0000ff00
orr r6, r6, #0x000000f0 @ mask 0xff07fff0
orr r7, r7, #0x0000b000
orr r7, r7, #0x00000020 @ val 0x4107b020
and r0, r9, r6
teq r0, r7 @ ARM 11MPCore?
orr r3, r3, #0x0000ff00
orr r3, r3, #0x000000f0 @ mask 0xff07fff0
orr r4, r4, #0x0000b000
orr r4, r4, #0x00000020 @ val 0x4107b020
and r0, r9, r3
teq r0, r4 @ ARM 11MPCore?
moveq pc, lr @ yes, assume SMP
mrc p15, 0, r0, c0, c0, 5 @ read MPIDR
......@@ -408,15 +413,22 @@ __fixup_smp:
__fixup_smp_on_up:
adr r0, 1f
ldmia r0, {r3, r6, r7}
ldmia r0, {r3 - r5}
sub r3, r0, r3
add r6, r6, r3
add r7, r7, r3
2: cmp r6, r7
ldmia r6!, {r0, r4}
strlo r4, [r0, r3]
blo 2b
mov pc, lr
add r4, r4, r3
add r5, r5, r3
2: cmp r4, r5
movhs pc, lr
ldmia r4!, {r0, r6}
ARM( str r6, [r0, r3] )
THUMB( add r0, r0, r3 )
#ifdef __ARMEB__
THUMB( mov r6, r6, ror #16 ) @ Convert word order for big-endian.
#endif
THUMB( strh r6, [r0], #2 ) @ For Thumb-2, store as two halfwords
THUMB( mov r6, r6, lsr #16 ) @ to be robust against misaligned r3.
THUMB( strh r6, [r0] )
b 2b
ENDPROC(__fixup_smp)
.align
......
This diff is collapsed.
......@@ -35,8 +35,10 @@
#include <linux/list.h>
#include <linux/kallsyms.h>
#include <linux/proc_fs.h>
#include <linux/ftrace.h>
#include <asm/system.h>
#include <asm/mach/arch.h>
#include <asm/mach/irq.h>
#include <asm/mach/time.h>
......@@ -47,8 +49,6 @@
#define irq_finish(irq) do { } while (0)
#endif
unsigned int arch_nr_irqs;
void (*init_arch_irq)(void) __initdata = NULL;
unsigned long irq_err_count;
int show_interrupts(struct seq_file *p, void *v)
......@@ -57,11 +57,20 @@ int show_interrupts(struct seq_file *p, void *v)
struct irq_desc *desc;
struct irqaction * action;
unsigned long flags;
int prec, n;
for (prec = 3, n = 1000; prec < 10 && n <= nr_irqs; prec++)
n *= 10;
#ifdef CONFIG_SMP
if (prec < 4)
prec = 4;
#endif
if (i == 0) {
char cpuname[12];
seq_printf(p, " ");
seq_printf(p, "%*s ", prec, "");
for_each_present_cpu(cpu) {
sprintf(cpuname, "CPU%d", cpu);
seq_printf(p, " %10s", cpuname);
......@@ -76,7 +85,7 @@ int show_interrupts(struct seq_file *p, void *v)
if (!action)
goto unlock;
seq_printf(p, "%3d: ", i);
seq_printf(p, "%*d: ", prec, i);
for_each_present_cpu(cpu)
seq_printf(p, "%10u ", kstat_irqs_cpu(i, cpu));
seq_printf(p, " %10s", desc->chip->name ? : "-");
......@@ -89,13 +98,15 @@ int show_interrupts(struct seq_file *p, void *v)
raw_spin_unlock_irqrestore(&desc->lock, flags);
} else if (i == nr_irqs) {
#ifdef CONFIG_FIQ
show_fiq_list(p, v);
show_fiq_list(p, prec);
#endif
#ifdef CONFIG_SMP
show_ipi_list(p);
show_local_irqs(p);
show_ipi_list(p, prec);
#endif
#ifdef CONFIG_LOCAL_TIMERS
show_local_irqs(p, prec);
#endif
seq_printf(p, "Err: %10lu\n", irq_err_count);
seq_printf(p, "%*s: %10lu\n", prec, "Err", irq_err_count);
}
return 0;
}
......@@ -105,7 +116,8 @@ int show_interrupts(struct seq_file *p, void *v)
* come via this function. Instead, they should provide their
* own 'handler'
*/
asmlinkage void __exception asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
asmlinkage void __exception_irq_entry
asm_do_IRQ(unsigned int irq, struct pt_regs *regs)
{
struct pt_regs *old_regs = set_irq_regs(regs);
......@@ -154,13 +166,13 @@ void set_irq_flags(unsigned int irq, unsigned int iflags)
void __init init_IRQ(void)
{
init_arch_irq();
machine_desc->init_irq();
}
#ifdef CONFIG_SPARSE_IRQ
int __init arch_probe_nr_irqs(void)
{
nr_irqs = arch_nr_irqs ? arch_nr_irqs : NR_IRQS;
nr_irqs = machine_desc->nr_irqs ? machine_desc->nr_irqs : NR_IRQS;
return nr_irqs;
}
#endif
......
......@@ -19,6 +19,14 @@
#include <asm/thread_info.h>
#include <asm/asm-offsets.h>
#if defined(CONFIG_CPU_PJ4)
#define PJ4(code...) code
#define XSC(code...)
#else
#define PJ4(code...)
#define XSC(code...) code
#endif
#define MMX_WR0 (0x00)
#define MMX_WR1 (0x08)
#define MMX_WR2 (0x10)
......@@ -58,11 +66,17 @@
ENTRY(iwmmxt_task_enable)
mrc p15, 0, r2, c15, c1, 0
tst r2, #0x3 @ CP0 and CP1 accessible?
XSC(mrc p15, 0, r2, c15, c1, 0)
PJ4(mrc p15, 0, r2, c1, c0, 2)
@ CP0 and CP1 accessible?
XSC(tst r2, #0x3)
PJ4(tst r2, #0xf)
movne pc, lr @ if so no business here
orr r2, r2, #0x3 @ enable access to CP0 and CP1
mcr p15, 0, r2, c15, c1, 0
@ enable access to CP0 and CP1
XSC(orr r2, r2, #0x3)
XSC(mcr p15, 0, r2, c15, c1, 0)
PJ4(orr r2, r2, #0xf)
PJ4(mcr p15, 0, r2, c1, c0, 2)
ldr r3, =concan_owner
add r0, r10, #TI_IWMMXT_STATE @ get task Concan save area
......@@ -179,17 +193,26 @@ ENTRY(iwmmxt_task_disable)
teqne r1, r2 @ or specified one?
bne 1f @ no: quit
mrc p15, 0, r4, c15, c1, 0
orr r4, r4, #0x3 @ enable access to CP0 and CP1
mcr p15, 0, r4, c15, c1, 0
@ enable access to CP0 and CP1
XSC(mrc p15, 0, r4, c15, c1, 0)
XSC(orr r4, r4, #0xf)
XSC(mcr p15, 0, r4, c15, c1, 0)
PJ4(mrc p15, 0, r4, c1, c0, 2)
PJ4(orr r4, r4, #0x3)
PJ4(mcr p15, 0, r4, c1, c0, 2)
mov r0, #0 @ nothing to load
str r0, [r3] @ no more current owner
mrc p15, 0, r2, c2, c0, 0
mov r2, r2 @ cpwait
bl concan_save
bic r4, r4, #0x3 @ disable access to CP0 and CP1
mcr p15, 0, r4, c15, c1, 0
@ disable access to CP0 and CP1
XSC(bic r4, r4, #0x3)
XSC(mcr p15, 0, r4, c15, c1, 0)
PJ4(bic r4, r4, #0xf)
PJ4(mcr p15, 0, r4, c1, c0, 2)
mrc p15, 0, r2, c2, c0, 0
mov r2, r2 @ cpwait
......@@ -277,8 +300,11 @@ ENTRY(iwmmxt_task_restore)
*/
ENTRY(iwmmxt_task_switch)
mrc p15, 0, r1, c15, c1, 0
tst r1, #0x3 @ CP0 and CP1 accessible?
XSC(mrc p15, 0, r1, c15, c1, 0)
PJ4(mrc p15, 0, r1, c1, c0, 2)
@ CP0 and CP1 accessible?
XSC(tst r1, #0x3)
PJ4(tst r1, #0xf)
bne 1f @ yes: block them for next task
ldr r2, =concan_owner
......@@ -287,8 +313,11 @@ ENTRY(iwmmxt_task_switch)
teq r2, r3 @ next task owns it?
movne pc, lr @ no: leave Concan disabled
1: eor r1, r1, #3 @ flip Concan access
mcr p15, 0, r1, c15, c1, 0
1: @ flip Conan access
XSC(eor r1, r1, #0x3)
XSC(mcr p15, 0, r1, c15, c1, 0)
PJ4(eor r1, r1, #0xf)
PJ4(mcr p15, 0, r1, c1, c0, 2)
mrc p15, 0, r1, c2, c0, 0
sub pc, lr, r1, lsr #32 @ cpwait and return
......
......@@ -23,6 +23,8 @@ extern unsigned long kexec_indirection_page;
extern unsigned long kexec_mach_type;
extern unsigned long kexec_boot_atags;
static atomic_t waiting_for_crash_ipi;
/*
* Provide a dummy crash_notes definition while crash dump arrives to arm.
* This prevents breakage of crash_notes attribute in kernel/ksysfs.c.
......@@ -37,9 +39,37 @@ void machine_kexec_cleanup(struct kimage *image)
{
}
void machine_crash_nonpanic_core(void *unused)
{
struct pt_regs regs;
crash_setup_regs(&regs, NULL);
printk(KERN_DEBUG "CPU %u will stop doing anything useful since another CPU has crashed\n",
smp_processor_id());
crash_save_cpu(&regs, smp_processor_id());
flush_cache_all();
atomic_dec(&waiting_for_crash_ipi);
while (1)
cpu_relax();
}
void machine_crash_shutdown(struct pt_regs *regs)
{
unsigned long msecs;
local_irq_disable();
atomic_set(&waiting_for_crash_ipi, num_online_cpus() - 1);
smp_call_function(machine_crash_nonpanic_core, NULL, false);
msecs = 1000; /* Wait at most a second for the other cpus to stop */
while ((atomic_read(&waiting_for_crash_ipi) > 0) && msecs) {
mdelay(1);
msecs--;
}
if (atomic_read(&waiting_for_crash_ipi) > 0)
printk(KERN_WARNING "Non-crashing CPUs did not react to IPI\n");
crash_save_cpu(regs, smp_processor_id());
printk(KERN_INFO "Loading crashdump kernel...\n");
......
......@@ -67,35 +67,6 @@ int module_frob_arch_sections(Elf_Ehdr *hdr,
char *secstrings,
struct module *mod)
{
#ifdef CONFIG_ARM_UNWIND
Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum;
struct arm_unwind_mapping *maps = mod->arch.map;
for (s = sechdrs; s < sechdrs_end; s++) {
char const *secname = secstrings + s->sh_name;
if (strcmp(".ARM.exidx.init.text", secname) == 0)
maps[ARM_SEC_INIT].unw_sec = s;
else if (strcmp(".ARM.exidx.devinit.text", secname) == 0)
maps[ARM_SEC_DEVINIT].unw_sec = s;
else if (strcmp(".ARM.exidx", secname) == 0)
maps[ARM_SEC_CORE].unw_sec = s;
else if (strcmp(".ARM.exidx.exit.text", secname) == 0)
maps[ARM_SEC_EXIT].unw_sec = s;
else if (strcmp(".ARM.exidx.devexit.text", secname) == 0)
maps[ARM_SEC_DEVEXIT].unw_sec = s;
else if (strcmp(".init.text", secname) == 0)
maps[ARM_SEC_INIT].sec_text = s;
else if (strcmp(".devinit.text", secname) == 0)
maps[ARM_SEC_DEVINIT].sec_text = s;
else if (strcmp(".text", secname) == 0)
maps[ARM_SEC_CORE].sec_text = s;
else if (strcmp(".exit.text", secname) == 0)
maps[ARM_SEC_EXIT].sec_text = s;
else if (strcmp(".devexit.text", secname) == 0)
maps[ARM_SEC_DEVEXIT].sec_text = s;
}
#endif
return 0;
}
......@@ -300,41 +271,69 @@ apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
return -ENOEXEC;
}
#ifdef CONFIG_ARM_UNWIND
static void register_unwind_tables(struct module *mod)
struct mod_unwind_map {
const Elf_Shdr *unw_sec;
const Elf_Shdr *txt_sec;
};
int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
struct module *mod)
{
#ifdef CONFIG_ARM_UNWIND
const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
const Elf_Shdr *s, *sechdrs_end = sechdrs + hdr->e_shnum;
struct mod_unwind_map maps[ARM_SEC_MAX];
int i;
for (i = 0; i < ARM_SEC_MAX; ++i) {
struct arm_unwind_mapping *map = &mod->arch.map[i];
if (map->unw_sec && map->sec_text)
map->unwind = unwind_table_add(map->unw_sec->sh_addr,
map->unw_sec->sh_size,
map->sec_text->sh_addr,
map->sec_text->sh_size);
memset(maps, 0, sizeof(maps));
for (s = sechdrs; s < sechdrs_end; s++) {
const char *secname = secstrs + s->sh_name;
if (!(s->sh_flags & SHF_ALLOC))
continue;
if (strcmp(".ARM.exidx.init.text", secname) == 0)
maps[ARM_SEC_INIT].unw_sec = s;
else if (strcmp(".ARM.exidx.devinit.text", secname) == 0)
maps[ARM_SEC_DEVINIT].unw_sec = s;
else if (strcmp(".ARM.exidx", secname) == 0)
maps[ARM_SEC_CORE].unw_sec = s;
else if (strcmp(".ARM.exidx.exit.text", secname) == 0)
maps[ARM_SEC_EXIT].unw_sec = s;
else if (strcmp(".ARM.exidx.devexit.text", secname) == 0)
maps[ARM_SEC_DEVEXIT].unw_sec = s;
else if (strcmp(".init.text", secname) == 0)
maps[ARM_SEC_INIT].txt_sec = s;
else if (strcmp(".devinit.text", secname) == 0)
maps[ARM_SEC_DEVINIT].txt_sec = s;
else if (strcmp(".text", secname) == 0)
maps[ARM_SEC_CORE].txt_sec = s;
else if (strcmp(".exit.text", secname) == 0)
maps[ARM_SEC_EXIT].txt_sec = s;
else if (strcmp(".devexit.text", secname) == 0)
maps[ARM_SEC_DEVEXIT].txt_sec = s;
}
}
static void unregister_unwind_tables(struct module *mod)
{
int i = ARM_SEC_MAX;
while (--i >= 0)
unwind_table_del(mod->arch.map[i].unwind);
}
#else
static inline void register_unwind_tables(struct module *mod) { }
static inline void unregister_unwind_tables(struct module *mod) { }
for (i = 0; i < ARM_SEC_MAX; i++)
if (maps[i].unw_sec && maps[i].txt_sec)
mod->arch.unwind[i] =
unwind_table_add(maps[i].unw_sec->sh_addr,
maps[i].unw_sec->sh_size,
maps[i].txt_sec->sh_addr,
maps[i].txt_sec->sh_size);
#endif
int
module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
struct module *module)
{
register_unwind_tables(module);
return 0;
}
void
module_arch_cleanup(struct module *mod)
{
unregister_unwind_tables(mod);
#ifdef CONFIG_ARM_UNWIND
int i;
for (i = 0; i < ARM_SEC_MAX; i++)
if (mod->arch.unwind[i])
unwind_table_del(mod->arch.unwind[i]);
#endif
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
/*
* linux/arch/arm/kernel/pj4-cp0.c
*
* PJ4 iWMMXt coprocessor context switching and handling
*
* Copyright (c) 2010 Marvell International Inc.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/module.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/sched.h>
#include <linux/init.h>
#include <linux/io.h>
#include <asm/thread_notify.h>
static int iwmmxt_do(struct notifier_block *self, unsigned long cmd, void *t)
{
struct thread_info *thread = t;
switch (cmd) {
case THREAD_NOTIFY_FLUSH:
/*
* flush_thread() zeroes thread->fpstate, so no need
* to do anything here.
*
* FALLTHROUGH: Ensure we don't try to overwrite our newly
* initialised state information on the first fault.
*/
case THREAD_NOTIFY_EXIT:
iwmmxt_task_release(thread);
break;
case THREAD_NOTIFY_SWITCH:
iwmmxt_task_switch(thread);
break;
}
return NOTIFY_DONE;
}
static struct notifier_block iwmmxt_notifier_block = {
.notifier_call = iwmmxt_do,
};
static u32 __init pj4_cp_access_read(void)
{
u32 value;
__asm__ __volatile__ (
"mrc p15, 0, %0, c1, c0, 2\n\t"
: "=r" (value));
return value;
}
static void __init pj4_cp_access_write(u32 value)
{
u32 temp;
__asm__ __volatile__ (
"mcr p15, 0, %1, c1, c0, 2\n\t"
"mrc p15, 0, %0, c1, c0, 2\n\t"
"mov %0, %0\n\t"
"sub pc, pc, #4\n\t"
: "=r" (temp) : "r" (value));
}
/*
* Disable CP0/CP1 on boot, and let call_fpe() and the iWMMXt lazy
* switch code handle iWMMXt context switching.
*/
static int __init pj4_cp0_init(void)
{
u32 cp_access;
cp_access = pj4_cp_access_read() & ~0xf;
pj4_cp_access_write(cp_access);
printk(KERN_INFO "PJ4 iWMMXt coprocessor enabled.\n");
elf_hwcap |= HWCAP_IWMMXT;
thread_register_notifier(&iwmmxt_notifier_block);
return 0;
}
late_initcall(pj4_cp0_init);
/*
* sched_clock.c: support for extending counters to full 64-bit ns counter
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*/
#include <linux/clocksource.h>
#include <linux/init.h>
#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/sched.h>
#include <linux/timer.h>
#include <asm/sched_clock.h>
static void sched_clock_poll(unsigned long wrap_ticks);
static DEFINE_TIMER(sched_clock_timer, sched_clock_poll, 0, 0);
static void (*sched_clock_update_fn)(void);
static void sched_clock_poll(unsigned long wrap_ticks)
{
mod_timer(&sched_clock_timer, round_jiffies(jiffies + wrap_ticks));
sched_clock_update_fn();
}
void __init init_sched_clock(struct clock_data *cd, void (*update)(void),
unsigned int clock_bits, unsigned long rate)
{
unsigned long r, w;
u64 res, wrap;
char r_unit;
sched_clock_update_fn = update;
/* calculate the mult/shift to convert counter ticks to ns. */
clocks_calc_mult_shift(&cd->mult, &cd->shift, rate, NSEC_PER_SEC, 60);
r = rate;
if (r >= 4000000) {
r /= 1000000;
r_unit = 'M';
} else {
r /= 1000;
r_unit = 'k';
}
/* calculate how many ns until we wrap */
wrap = cyc_to_ns((1ULL << clock_bits) - 1, cd->mult, cd->shift);
do_div(wrap, NSEC_PER_MSEC);
w = wrap;
/* calculate the ns resolution of this counter */
res = cyc_to_ns(1ULL, cd->mult, cd->shift);
pr_info("sched_clock: %u bits at %lu%cHz, resolution %lluns, wraps every %lums\n",
clock_bits, r, r_unit, res, w);
/*
* Start the timer to keep sched_clock() properly updated and
* sets the initial epoch.
*/
sched_clock_timer.data = msecs_to_jiffies(w - (w / 10));
sched_clock_poll(sched_clock_timer.data);
/*
* Ensure that sched_clock() starts off at 0ns
*/
cd->epoch_ns = 0;
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
......@@ -30,12 +30,13 @@
#include <asm/leds.h>
#include <asm/thread_info.h>
#include <asm/stacktrace.h>
#include <asm/mach/arch.h>
#include <asm/mach/time.h>
/*
* Our system timer.
*/
struct sys_timer *system_timer;
static struct sys_timer *system_timer;
#if defined(CONFIG_RTC_DRV_CMOS) || defined(CONFIG_RTC_DRV_CMOS_MODULE)
/* this needs a better home */
......@@ -160,6 +161,7 @@ device_initcall(timer_init_sysfs);
void __init time_init(void)
{
system_timer = machine_desc->timer;
system_timer->init();
}
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment