Commit eb3d3ec5 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm into next

Pull ARM updates from Russell King:

 - Major clean-up of the L2 cache support code.  The existing mess was
   becoming rather unmaintainable through all the additions that others
   have done over time.  This turns it into a much nicer structure, and
   implements a few performance improvements as well.

 - Clean up some of the CP15 control register tweaks for alignment
   support, moving some code and data into alignment.c

 - DMA properties for ARM, from Santosh and reviewed by DT people.  This
   adds DT properties to specify bus translations we can't discover
   automatically, and to indicate whether devices are coherent.

 - Hibernation support for ARM

 - Make ftrace work with read-only text in modules

 - add suspend support for PJ4B CPUs

 - rework interrupt masking for undefined instruction handling, which
   allows us to enable interrupts earlier in the handling of these
   exceptions.

 - support for big endian page tables

 - fix stacktrace support to exclude stacktrace functions from the
   trace, and add save_stack_trace_regs() implementation so that kprobes
   can record stack traces.

 - Add support for the Cortex-A17 CPU.

 - Remove last vestiges of ARM710 support.

 - Removal of ARM "meminfo" structure, finally converting us solely to
   memblock to handle the early memory initialisation.

* 'for-linus' of git://ftp.arm.linux.org.uk/~rmk/linux-arm: (142 commits)
  ARM: ensure C page table setup code follows assembly code (part II)
  ARM: ensure C page table setup code follows assembly code
  ARM: consolidate last remaining open-coded alignment trap enable
  ARM: remove global cr_no_alignment
  ARM: remove CPU_CP15 conditional from alignment.c
  ARM: remove unused adjust_cr() function
  ARM: move "noalign" command line option to alignment.c
  ARM: provide common method to clear bits in CPU control register
  ARM: 8025/1: Get rid of meminfo
  ARM: 8060/1: mm: allow sub-architectures to override PCI I/O memory type
  ARM: 8066/1: correction for ARM patch 8031/2
  ARM: 8049/1: ftrace/add save_stack_trace_regs() implementation
  ARM: 8065/1: remove last use of CONFIG_CPU_ARM710
  ARM: 8062/1: Modify ldrt fixup handler to re-execute the userspace instruction
  ARM: 8047/1: rwsem: use asm-generic rwsem implementation
  ARM: l2c: trial at enabling some Cortex-A9 optimisations
  ARM: l2c: add warnings for stuff modifying aux_ctrl register values
  ARM: l2c: print a warning with L2C-310 caches if the cache size is modified
  ARM: l2c: remove old .set_debug method
  ARM: l2c: kill L2X0_AUX_CTRL_MASK before anyone else makes use of this
  ...
parents c3c55a07 bd63ce27
...@@ -41,16 +41,9 @@ fffe8000 fffeffff DTCM mapping area for platforms with ...@@ -41,16 +41,9 @@ fffe8000 fffeffff DTCM mapping area for platforms with
fffe0000 fffe7fff ITCM mapping area for platforms with fffe0000 fffe7fff ITCM mapping area for platforms with
ITCM mounted inside the CPU. ITCM mounted inside the CPU.
fff00000 fffdffff Fixmap mapping region. Addresses provided ffc00000 ffdfffff Fixmap mapping region. Addresses provided
by fix_to_virt() will be located here. by fix_to_virt() will be located here.
ffc00000 ffefffff DMA memory mapping region. Memory returned
by the dma_alloc_xxx functions will be
dynamically mapped here.
ff000000 ffbfffff Reserved for future expansion of DMA
mapping region.
fee00000 feffffff Mapping of PCI I/O space. This is a static fee00000 feffffff Mapping of PCI I/O space. This is a static
mapping within the vmalloc space. mapping within the vmalloc space.
......
...@@ -8,6 +8,7 @@ Required properties: ...@@ -8,6 +8,7 @@ Required properties:
- compatible : should be one of - compatible : should be one of
"arm,armv8-pmuv3" "arm,armv8-pmuv3"
"arm,cortex-a17-pmu"
"arm,cortex-a15-pmu" "arm,cortex-a15-pmu"
"arm,cortex-a12-pmu" "arm,cortex-a12-pmu"
"arm,cortex-a9-pmu" "arm,cortex-a9-pmu"
......
...@@ -165,12 +165,9 @@ config TRACE_IRQFLAGS_SUPPORT ...@@ -165,12 +165,9 @@ config TRACE_IRQFLAGS_SUPPORT
bool bool
default y default y
config RWSEM_GENERIC_SPINLOCK
bool
default y
config RWSEM_XCHGADD_ALGORITHM config RWSEM_XCHGADD_ALGORITHM
bool bool
default y
config ARCH_HAS_ILOG2_U32 config ARCH_HAS_ILOG2_U32
bool bool
...@@ -1089,11 +1086,6 @@ source "arch/arm/firmware/Kconfig" ...@@ -1089,11 +1086,6 @@ source "arch/arm/firmware/Kconfig"
source arch/arm/mm/Kconfig source arch/arm/mm/Kconfig
config ARM_NR_BANKS
int
default 16 if ARCH_EP93XX
default 8
config IWMMXT config IWMMXT
bool "Enable iWMMXt support" bool "Enable iWMMXt support"
depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4 || CPU_PJ4B depends on CPU_XSCALE || CPU_XSC3 || CPU_MOHAWK || CPU_PJ4 || CPU_PJ4B
...@@ -1214,19 +1206,6 @@ config ARM_ERRATA_742231 ...@@ -1214,19 +1206,6 @@ config ARM_ERRATA_742231
register of the Cortex-A9 which reduces the linefill issuing register of the Cortex-A9 which reduces the linefill issuing
capabilities of the processor. capabilities of the processor.
config PL310_ERRATA_588369
bool "PL310 errata: Clean & Invalidate maintenance operations do not invalidate clean lines"
depends on CACHE_L2X0
help
The PL310 L2 cache controller implements three types of Clean &
Invalidate maintenance operations: by Physical Address
(offset 0x7F0), by Index/Way (0x7F8) and by Way (0x7FC).
They are architecturally defined to behave as the execution of a
clean operation followed immediately by an invalidate operation,
both performing to the same memory location. This functionality
is not correctly implemented in PL310 as clean lines are not
invalidated as a result of these operations.
config ARM_ERRATA_643719 config ARM_ERRATA_643719
bool "ARM errata: LoUIS bit field in CLIDR register is incorrect" bool "ARM errata: LoUIS bit field in CLIDR register is incorrect"
depends on CPU_V7 && SMP depends on CPU_V7 && SMP
...@@ -1249,17 +1228,6 @@ config ARM_ERRATA_720789 ...@@ -1249,17 +1228,6 @@ config ARM_ERRATA_720789
tables. The workaround changes the TLB flushing routines to invalidate tables. The workaround changes the TLB flushing routines to invalidate
entries regardless of the ASID. entries regardless of the ASID.
config PL310_ERRATA_727915
bool "PL310 errata: Background Clean & Invalidate by Way operation can cause data corruption"
depends on CACHE_L2X0
help
PL310 implements the Clean & Invalidate by Way L2 cache maintenance
operation (offset 0x7FC). This operation runs in background so that
PL310 can handle normal accesses while it is in progress. Under very
rare circumstances, due to this erratum, write data can be lost when
PL310 treats a cacheable write transaction during a Clean &
Invalidate by Way operation.
config ARM_ERRATA_743622 config ARM_ERRATA_743622
bool "ARM errata: Faulty hazard checking in the Store Buffer may lead to data corruption" bool "ARM errata: Faulty hazard checking in the Store Buffer may lead to data corruption"
depends on CPU_V7 depends on CPU_V7
...@@ -1285,21 +1253,6 @@ config ARM_ERRATA_751472 ...@@ -1285,21 +1253,6 @@ config ARM_ERRATA_751472
operation is received by a CPU before the ICIALLUIS has completed, operation is received by a CPU before the ICIALLUIS has completed,
potentially leading to corrupted entries in the cache or TLB. potentially leading to corrupted entries in the cache or TLB.
config PL310_ERRATA_753970
bool "PL310 errata: cache sync operation may be faulty"
depends on CACHE_PL310
help
This option enables the workaround for the 753970 PL310 (r3p0) erratum.
Under some condition the effect of cache sync operation on
the store buffer still remains when the operation completes.
This means that the store buffer is always asked to drain and
this prevents it from merging any further writes. The workaround
is to replace the normal offset of cache sync operation (0x730)
by another offset targeting an unmapped PL310 register 0x740.
This has the same effect as the cache sync operation: store buffer
drain and waiting for all buffers empty.
config ARM_ERRATA_754322 config ARM_ERRATA_754322
bool "ARM errata: possible faulty MMU translations following an ASID switch" bool "ARM errata: possible faulty MMU translations following an ASID switch"
depends on CPU_V7 depends on CPU_V7
...@@ -1348,18 +1301,6 @@ config ARM_ERRATA_764369 ...@@ -1348,18 +1301,6 @@ config ARM_ERRATA_764369
relevant cache maintenance functions and sets a specific bit relevant cache maintenance functions and sets a specific bit
in the diagnostic control register of the SCU. in the diagnostic control register of the SCU.
config PL310_ERRATA_769419
bool "PL310 errata: no automatic Store Buffer drain"
depends on CACHE_L2X0
help
On revisions of the PL310 prior to r3p2, the Store Buffer does
not automatically drain. This can cause normal, non-cacheable
writes to be retained when the memory system is idle, leading
to suboptimal I/O performance for drivers using coherent DMA.
This option adds a write barrier to the cpu_idle loop so that,
on systems with an outer cache, the store buffer is drained
explicitly.
config ARM_ERRATA_775420 config ARM_ERRATA_775420
bool "ARM errata: A data cache maintenance operation which aborts, might lead to deadlock" bool "ARM errata: A data cache maintenance operation which aborts, might lead to deadlock"
depends on CPU_V7 depends on CPU_V7
...@@ -2279,6 +2220,11 @@ config ARCH_SUSPEND_POSSIBLE ...@@ -2279,6 +2220,11 @@ config ARCH_SUSPEND_POSSIBLE
config ARM_CPU_SUSPEND config ARM_CPU_SUSPEND
def_bool PM_SLEEP def_bool PM_SLEEP
config ARCH_HIBERNATION_POSSIBLE
bool
depends on MMU
default y if ARCH_SUSPEND_POSSIBLE
endmenu endmenu
source "net/Kconfig" source "net/Kconfig"
......
...@@ -7,6 +7,8 @@ ...@@ -7,6 +7,8 @@
#define do_extend_cmdline 0 #define do_extend_cmdline 0
#endif #endif
#define NR_BANKS 16
static int node_offset(void *fdt, const char *node_path) static int node_offset(void *fdt, const char *node_path)
{ {
int offset = fdt_path_offset(fdt, node_path); int offset = fdt_path_offset(fdt, node_path);
......
...@@ -36,7 +36,7 @@ axi { ...@@ -36,7 +36,7 @@ axi {
ranges = <0x40000000 0x40000000 0xa0000000>; ranges = <0x40000000 0x40000000 0xa0000000>;
l2-cache-controller@c0030000 { l2-cache-controller@c0030000 {
compatible = "sirf,marco-pl310-cache", "arm,pl310-cache"; compatible = "arm,pl310-cache";
reg = <0xc0030000 0x1000>; reg = <0xc0030000 0x1000>;
interrupts = <0 59 0>; interrupts = <0 59 0>;
arm,tag-latency = <1 1 1>; arm,tag-latency = <1 1 1>;
......
...@@ -48,7 +48,7 @@ axi { ...@@ -48,7 +48,7 @@ axi {
ranges = <0x40000000 0x40000000 0x80000000>; ranges = <0x40000000 0x40000000 0x80000000>;
l2-cache-controller@80040000 { l2-cache-controller@80040000 {
compatible = "arm,pl310-cache", "sirf,prima2-pl310-cache"; compatible = "arm,pl310-cache";
reg = <0x80040000 0x1000>; reg = <0x80040000 0x1000>;
interrupts = <59>; interrupts = <59>;
arm,tag-latency = <1 1 1>; arm,tag-latency = <1 1 1>;
......
...@@ -106,14 +106,14 @@ void mcpm_cpu_power_down(void) ...@@ -106,14 +106,14 @@ void mcpm_cpu_power_down(void)
BUG(); BUG();
} }
int mcpm_cpu_power_down_finish(unsigned int cpu, unsigned int cluster) int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster)
{ {
int ret; int ret;
if (WARN_ON_ONCE(!platform_ops || !platform_ops->power_down_finish)) if (WARN_ON_ONCE(!platform_ops || !platform_ops->wait_for_powerdown))
return -EUNATCH; return -EUNATCH;
ret = platform_ops->power_down_finish(cpu, cluster); ret = platform_ops->wait_for_powerdown(cpu, cluster);
if (ret) if (ret)
pr_warn("%s: cpu %u, cluster %u failed to power down (%d)\n", pr_warn("%s: cpu %u, cluster %u failed to power down (%d)\n",
__func__, cpu, cluster, ret); __func__, cpu, cluster, ret);
......
...@@ -62,7 +62,7 @@ static int mcpm_cpu_kill(unsigned int cpu) ...@@ -62,7 +62,7 @@ static int mcpm_cpu_kill(unsigned int cpu)
cpu_to_pcpu(cpu, &pcpu, &pcluster); cpu_to_pcpu(cpu, &pcpu, &pcluster);
return !mcpm_cpu_power_down_finish(pcpu, pcluster); return !mcpm_wait_for_cpu_powerdown(pcpu, pcluster);
} }
static int mcpm_cpu_disable(unsigned int cpu) static int mcpm_cpu_disable(unsigned int cpu)
......
...@@ -21,6 +21,7 @@ generic-y += parport.h ...@@ -21,6 +21,7 @@ generic-y += parport.h
generic-y += poll.h generic-y += poll.h
generic-y += preempt.h generic-y += preempt.h
generic-y += resource.h generic-y += resource.h
generic-y += rwsem.h
generic-y += sections.h generic-y += sections.h
generic-y += segment.h generic-y += segment.h
generic-y += sembuf.h generic-y += sembuf.h
......
...@@ -312,7 +312,7 @@ ...@@ -312,7 +312,7 @@
* you cannot return to the original mode. * you cannot return to the original mode.
*/ */
.macro safe_svcmode_maskall reg:req .macro safe_svcmode_maskall reg:req
#if __LINUX_ARM_ARCH__ >= 6 #if __LINUX_ARM_ARCH__ >= 6 && !defined(CONFIG_CPU_V7M)
mrs \reg , cpsr mrs \reg , cpsr
eor \reg, \reg, #HYP_MODE eor \reg, \reg, #HYP_MODE
tst \reg, #MODE_MASK tst \reg, #MODE_MASK
......
...@@ -212,7 +212,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *, ...@@ -212,7 +212,7 @@ extern void copy_to_user_page(struct vm_area_struct *, struct page *,
static inline void __flush_icache_all(void) static inline void __flush_icache_all(void)
{ {
__flush_icache_preferred(); __flush_icache_preferred();
dsb(); dsb(ishst);
} }
/* /*
...@@ -487,4 +487,6 @@ int set_memory_rw(unsigned long addr, int numpages); ...@@ -487,4 +487,6 @@ int set_memory_rw(unsigned long addr, int numpages);
int set_memory_x(unsigned long addr, int numpages); int set_memory_x(unsigned long addr, int numpages);
int set_memory_nx(unsigned long addr, int numpages); int set_memory_nx(unsigned long addr, int numpages);
void flush_uprobe_xol_access(struct page *page, unsigned long uaddr,
void *kaddr, unsigned long len);
#endif #endif
...@@ -42,24 +42,23 @@ ...@@ -42,24 +42,23 @@
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
#if __LINUX_ARM_ARCH__ >= 4 #if __LINUX_ARM_ARCH__ >= 4
#define vectors_high() (cr_alignment & CR_V) #define vectors_high() (get_cr() & CR_V)
#else #else
#define vectors_high() (0) #define vectors_high() (0)
#endif #endif
#ifdef CONFIG_CPU_CP15 #ifdef CONFIG_CPU_CP15
extern unsigned long cr_no_alignment; /* defined in entry-armv.S */
extern unsigned long cr_alignment; /* defined in entry-armv.S */ extern unsigned long cr_alignment; /* defined in entry-armv.S */
static inline unsigned int get_cr(void) static inline unsigned long get_cr(void)
{ {
unsigned int val; unsigned long val;
asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc"); asm("mrc p15, 0, %0, c1, c0, 0 @ get CR" : "=r" (val) : : "cc");
return val; return val;
} }
static inline void set_cr(unsigned int val) static inline void set_cr(unsigned long val)
{ {
asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR" asm volatile("mcr p15, 0, %0, c1, c0, 0 @ set CR"
: : "r" (val) : "cc"); : : "r" (val) : "cc");
...@@ -80,10 +79,6 @@ static inline void set_auxcr(unsigned int val) ...@@ -80,10 +79,6 @@ static inline void set_auxcr(unsigned int val)
isb(); isb();
} }
#ifndef CONFIG_SMP
extern void adjust_cr(unsigned long mask, unsigned long set);
#endif
#define CPACC_FULL(n) (3 << (n * 2)) #define CPACC_FULL(n) (3 << (n * 2))
#define CPACC_SVC(n) (1 << (n * 2)) #define CPACC_SVC(n) (1 << (n * 2))
#define CPACC_DISABLE(n) (0 << (n * 2)) #define CPACC_DISABLE(n) (0 << (n * 2))
...@@ -106,13 +101,17 @@ static inline void set_copro_access(unsigned int val) ...@@ -106,13 +101,17 @@ static inline void set_copro_access(unsigned int val)
#else /* ifdef CONFIG_CPU_CP15 */ #else /* ifdef CONFIG_CPU_CP15 */
/* /*
* cr_alignment and cr_no_alignment are tightly coupled to cp15 (at least in the * cr_alignment is tightly coupled to cp15 (at least in the minds of the
* minds of the developers). Yielding 0 for machines without a cp15 (and making * developers). Yielding 0 for machines without a cp15 (and making it
* it read-only) is fine for most cases and saves quite some #ifdeffery. * read-only) is fine for most cases and saves quite some #ifdeffery.
*/ */
#define cr_no_alignment UL(0)
#define cr_alignment UL(0) #define cr_alignment UL(0)
static inline unsigned long get_cr(void)
{
return 0;
}
#endif /* ifdef CONFIG_CPU_CP15 / else */ #endif /* ifdef CONFIG_CPU_CP15 / else */
#endif /* ifndef __ASSEMBLY__ */ #endif /* ifndef __ASSEMBLY__ */
......
...@@ -72,6 +72,7 @@ ...@@ -72,6 +72,7 @@
#define ARM_CPU_PART_CORTEX_A15 0xC0F0 #define ARM_CPU_PART_CORTEX_A15 0xC0F0
#define ARM_CPU_PART_CORTEX_A7 0xC070 #define ARM_CPU_PART_CORTEX_A7 0xC070
#define ARM_CPU_PART_CORTEX_A12 0xC0D0 #define ARM_CPU_PART_CORTEX_A12 0xC0D0
#define ARM_CPU_PART_CORTEX_A17 0xC0E0
#define ARM_CPU_XSCALE_ARCH_MASK 0xe000 #define ARM_CPU_XSCALE_ARCH_MASK 0xe000
#define ARM_CPU_XSCALE_ARCH_V1 0x2000 #define ARM_CPU_XSCALE_ARCH_V1 0x2000
......
...@@ -58,21 +58,37 @@ static inline int dma_set_mask(struct device *dev, u64 mask) ...@@ -58,21 +58,37 @@ static inline int dma_set_mask(struct device *dev, u64 mask)
#ifndef __arch_pfn_to_dma #ifndef __arch_pfn_to_dma
static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn) static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
{ {
if (dev)
pfn -= dev->dma_pfn_offset;
return (dma_addr_t)__pfn_to_bus(pfn); return (dma_addr_t)__pfn_to_bus(pfn);
} }
static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr) static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
{ {
return __bus_to_pfn(addr); unsigned long pfn = __bus_to_pfn(addr);
if (dev)
pfn += dev->dma_pfn_offset;
return pfn;
} }
static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
{ {
if (dev) {
unsigned long pfn = dma_to_pfn(dev, addr);
return phys_to_virt(__pfn_to_phys(pfn));
}
return (void *)__bus_to_virt((unsigned long)addr); return (void *)__bus_to_virt((unsigned long)addr);
} }
static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
{ {
if (dev)
return pfn_to_dma(dev, virt_to_pfn(addr));
return (dma_addr_t)__virt_to_bus((unsigned long)(addr)); return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
} }
...@@ -105,6 +121,13 @@ static inline unsigned long dma_max_pfn(struct device *dev) ...@@ -105,6 +121,13 @@ static inline unsigned long dma_max_pfn(struct device *dev)
} }
#define dma_max_pfn(dev) dma_max_pfn(dev) #define dma_max_pfn(dev) dma_max_pfn(dev)
static inline int set_arch_dma_coherent_ops(struct device *dev)
{
set_dma_ops(dev, &arm_coherent_dma_ops);
return 0;
}
#define set_arch_dma_coherent_ops(dev) set_arch_dma_coherent_ops(dev)
static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr)
{ {
unsigned int offset = paddr & ~PAGE_MASK; unsigned int offset = paddr & ~PAGE_MASK;
......
#ifndef _ASM_FIXMAP_H #ifndef _ASM_FIXMAP_H
#define _ASM_FIXMAP_H #define _ASM_FIXMAP_H
/* #define FIXADDR_START 0xffc00000UL
* Nothing too fancy for now. #define FIXADDR_TOP 0xffe00000UL
*
* On ARM we already have well known fixed virtual addresses imposed by
* the architecture such as the vector page which is located at 0xffff0000,
* therefore a second level page table is already allocated covering
* 0xfff00000 upwards.
*
* The cache flushing code in proc-xscale.S uses the virtual area between
* 0xfffe0000 and 0xfffeffff.
*/
#define FIXADDR_START 0xfff00000UL
#define FIXADDR_TOP 0xfffe0000UL
#define FIXADDR_SIZE (FIXADDR_TOP - FIXADDR_START) #define FIXADDR_SIZE (FIXADDR_TOP - FIXADDR_START)
#define FIX_KMAP_BEGIN 0 #define FIX_KMAP_NR_PTES (FIXADDR_SIZE >> PAGE_SHIFT)
#define FIX_KMAP_END (FIXADDR_SIZE >> PAGE_SHIFT)
#define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT)) #define __fix_to_virt(x) (FIXADDR_START + ((x) << PAGE_SHIFT))
#define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT) #define __virt_to_fix(x) (((x) - FIXADDR_START) >> PAGE_SHIFT)
...@@ -27,7 +14,7 @@ extern void __this_fixmap_does_not_exist(void); ...@@ -27,7 +14,7 @@ extern void __this_fixmap_does_not_exist(void);
static inline unsigned long fix_to_virt(const unsigned int idx) static inline unsigned long fix_to_virt(const unsigned int idx)
{ {
if (idx >= FIX_KMAP_END) if (idx >= FIX_KMAP_NR_PTES)
__this_fixmap_does_not_exist(); __this_fixmap_does_not_exist();
return __fix_to_virt(idx); return __fix_to_virt(idx);
} }
......
...@@ -31,14 +31,6 @@ ...@@ -31,14 +31,6 @@
#undef CPU_DABORT_HANDLER #undef CPU_DABORT_HANDLER
#undef MULTI_DABORT #undef MULTI_DABORT
#if defined(CONFIG_CPU_ARM710)
# ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1
# else
# define CPU_DABORT_HANDLER cpu_arm7_data_abort
# endif
#endif
#ifdef CONFIG_CPU_ABRT_EV4 #ifdef CONFIG_CPU_ABRT_EV4
# ifdef CPU_DABORT_HANDLER # ifdef CPU_DABORT_HANDLER
# define MULTI_DABORT 1 # define MULTI_DABORT 1
......
...@@ -26,8 +26,8 @@ ...@@ -26,8 +26,8 @@
#define L2X0_CACHE_TYPE 0x004 #define L2X0_CACHE_TYPE 0x004
#define L2X0_CTRL 0x100 #define L2X0_CTRL 0x100
#define L2X0_AUX_CTRL 0x104 #define L2X0_AUX_CTRL 0x104
#define L2X0_TAG_LATENCY_CTRL 0x108 #define L310_TAG_LATENCY_CTRL 0x108
#define L2X0_DATA_LATENCY_CTRL 0x10C #define L310_DATA_LATENCY_CTRL 0x10C
#define L2X0_EVENT_CNT_CTRL 0x200 #define L2X0_EVENT_CNT_CTRL 0x200
#define L2X0_EVENT_CNT1_CFG 0x204 #define L2X0_EVENT_CNT1_CFG 0x204
#define L2X0_EVENT_CNT0_CFG 0x208 #define L2X0_EVENT_CNT0_CFG 0x208
...@@ -54,53 +54,93 @@ ...@@ -54,53 +54,93 @@
#define L2X0_LOCKDOWN_WAY_D_BASE 0x900 #define L2X0_LOCKDOWN_WAY_D_BASE 0x900
#define L2X0_LOCKDOWN_WAY_I_BASE 0x904 #define L2X0_LOCKDOWN_WAY_I_BASE 0x904
#define L2X0_LOCKDOWN_STRIDE 0x08 #define L2X0_LOCKDOWN_STRIDE 0x08
#define L2X0_ADDR_FILTER_START 0xC00 #define L310_ADDR_FILTER_START 0xC00
#define L2X0_ADDR_FILTER_END 0xC04 #define L310_ADDR_FILTER_END 0xC04
#define L2X0_TEST_OPERATION 0xF00 #define L2X0_TEST_OPERATION 0xF00
#define L2X0_LINE_DATA 0xF10 #define L2X0_LINE_DATA 0xF10
#define L2X0_LINE_TAG 0xF30 #define L2X0_LINE_TAG 0xF30
#define L2X0_DEBUG_CTRL 0xF40 #define L2X0_DEBUG_CTRL 0xF40
#define L2X0_PREFETCH_CTRL 0xF60 #define L310_PREFETCH_CTRL 0xF60
#define L2X0_POWER_CTRL 0xF80 #define L310_POWER_CTRL 0xF80
#define L2X0_DYNAMIC_CLK_GATING_EN (1 << 1) #define L310_DYNAMIC_CLK_GATING_EN (1 << 1)
#define L2X0_STNDBY_MODE_EN (1 << 0) #define L310_STNDBY_MODE_EN (1 << 0)
/* Registers shifts and masks */ /* Registers shifts and masks */
#define L2X0_CACHE_ID_PART_MASK (0xf << 6) #define L2X0_CACHE_ID_PART_MASK (0xf << 6)
#define L2X0_CACHE_ID_PART_L210 (1 << 6) #define L2X0_CACHE_ID_PART_L210 (1 << 6)
#define L2X0_CACHE_ID_PART_L220 (2 << 6)
#define L2X0_CACHE_ID_PART_L310 (3 << 6) #define L2X0_CACHE_ID_PART_L310 (3 << 6)
#define L2X0_CACHE_ID_RTL_MASK 0x3f #define L2X0_CACHE_ID_RTL_MASK 0x3f
#define L2X0_CACHE_ID_RTL_R0P0 0x0 #define L210_CACHE_ID_RTL_R0P2_02 0x00
#define L2X0_CACHE_ID_RTL_R1P0 0x2 #define L210_CACHE_ID_RTL_R0P1 0x01
#define L2X0_CACHE_ID_RTL_R2P0 0x4 #define L210_CACHE_ID_RTL_R0P2_01 0x02
#define L2X0_CACHE_ID_RTL_R3P0 0x5 #define L210_CACHE_ID_RTL_R0P3 0x03
#define L2X0_CACHE_ID_RTL_R3P1 0x6 #define L210_CACHE_ID_RTL_R0P4 0x0b
#define L2X0_CACHE_ID_RTL_R3P2 0x8 #define L210_CACHE_ID_RTL_R0P5 0x0f
#define L220_CACHE_ID_RTL_R1P7_01REL0 0x06
#define L310_CACHE_ID_RTL_R0P0 0x00
#define L310_CACHE_ID_RTL_R1P0 0x02
#define L310_CACHE_ID_RTL_R2P0 0x04
#define L310_CACHE_ID_RTL_R3P0 0x05
#define L310_CACHE_ID_RTL_R3P1 0x06
#define L310_CACHE_ID_RTL_R3P1_50REL0 0x07
#define L310_CACHE_ID_RTL_R3P2 0x08
#define L310_CACHE_ID_RTL_R3P3 0x09
#define L2X0_AUX_CTRL_MASK 0xc0000fff /* L2C auxiliary control register - bits common to L2C-210/220/310 */
#define L2C_AUX_CTRL_WAY_SIZE_SHIFT 17
#define L2C_AUX_CTRL_WAY_SIZE_MASK (7 << 17)
#define L2C_AUX_CTRL_WAY_SIZE(n) ((n) << 17)
#define L2C_AUX_CTRL_EVTMON_ENABLE BIT(20)
#define L2C_AUX_CTRL_PARITY_ENABLE BIT(21)
#define L2C_AUX_CTRL_SHARED_OVERRIDE BIT(22)
/* L2C-210/220 common bits */
#define L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT 0 #define L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT 0
#define L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK 0x7 #define L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK (7 << 0)
#define L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT 3 #define L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT 3
#define L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK (0x7 << 3) #define L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK (7 << 3)
#define L2X0_AUX_CTRL_TAG_LATENCY_SHIFT 6 #define L2X0_AUX_CTRL_TAG_LATENCY_SHIFT 6
#define L2X0_AUX_CTRL_TAG_LATENCY_MASK (0x7 << 6) #define L2X0_AUX_CTRL_TAG_LATENCY_MASK (7 << 6)
#define L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT 9 #define L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT 9
#define L2X0_AUX_CTRL_DIRTY_LATENCY_MASK (0x7 << 9) #define L2X0_AUX_CTRL_DIRTY_LATENCY_MASK (7 << 9)
#define L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT 16 #define L2X0_AUX_CTRL_ASSOC_SHIFT 13
#define L2X0_AUX_CTRL_WAY_SIZE_SHIFT 17 #define L2X0_AUX_CTRL_ASSOC_MASK (15 << 13)
#define L2X0_AUX_CTRL_WAY_SIZE_MASK (0x7 << 17) /* L2C-210 specific bits */
#define L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT 22 #define L210_AUX_CTRL_WRAP_DISABLE BIT(12)
#define L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT 26 #define L210_AUX_CTRL_WA_OVERRIDE BIT(23)
#define L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT 27 #define L210_AUX_CTRL_EXCLUSIVE_ABORT BIT(24)
#define L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT 28 /* L2C-220 specific bits */
#define L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT 29 #define L220_AUX_CTRL_EXCLUSIVE_CACHE BIT(12)
#define L2X0_AUX_CTRL_EARLY_BRESP_SHIFT 30 #define L220_AUX_CTRL_FWA_SHIFT 23
#define L220_AUX_CTRL_FWA_MASK (3 << 23)
#define L220_AUX_CTRL_NS_LOCKDOWN BIT(26)
#define L220_AUX_CTRL_NS_INT_CTRL BIT(27)
/* L2C-310 specific bits */
#define L310_AUX_CTRL_FULL_LINE_ZERO BIT(0) /* R2P0+ */
#define L310_AUX_CTRL_HIGHPRIO_SO_DEV BIT(10) /* R2P0+ */
#define L310_AUX_CTRL_STORE_LIMITATION BIT(11) /* R2P0+ */
#define L310_AUX_CTRL_EXCLUSIVE_CACHE BIT(12)
#define L310_AUX_CTRL_ASSOCIATIVITY_16 BIT(16)
#define L310_AUX_CTRL_CACHE_REPLACE_RR BIT(25) /* R2P0+ */
#define L310_AUX_CTRL_NS_LOCKDOWN BIT(26)
#define L310_AUX_CTRL_NS_INT_CTRL BIT(27)
#define L310_AUX_CTRL_DATA_PREFETCH BIT(28)
#define L310_AUX_CTRL_INSTR_PREFETCH BIT(29)
#define L310_AUX_CTRL_EARLY_BRESP BIT(30) /* R2P0+ */
#define L2X0_LATENCY_CTRL_SETUP_SHIFT 0 #define L310_LATENCY_CTRL_SETUP(n) ((n) << 0)
#define L2X0_LATENCY_CTRL_RD_SHIFT 4 #define L310_LATENCY_CTRL_RD(n) ((n) << 4)
#define L2X0_LATENCY_CTRL_WR_SHIFT 8 #define L310_LATENCY_CTRL_WR(n) ((n) << 8)
#define L2X0_ADDR_FILTER_EN 1 #define L310_ADDR_FILTER_EN 1
#define L310_PREFETCH_CTRL_OFFSET_MASK 0x1f
#define L310_PREFETCH_CTRL_DBL_LINEFILL_INCR BIT(23)
#define L310_PREFETCH_CTRL_PREFETCH_DROP BIT(24)
#define L310_PREFETCH_CTRL_DBL_LINEFILL_WRAP BIT(27)
#define L310_PREFETCH_CTRL_DATA_PREFETCH BIT(28)
#define L310_PREFETCH_CTRL_INSTR_PREFETCH BIT(29)
#define L310_PREFETCH_CTRL_DBL_LINEFILL BIT(30)
#define L2X0_CTRL_EN 1 #define L2X0_CTRL_EN 1
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
} while (0) } while (0)
extern pte_t *pkmap_page_table; extern pte_t *pkmap_page_table;
extern pte_t *fixmap_page_table;
extern void *kmap_high(struct page *page); extern void *kmap_high(struct page *page);
extern void kunmap_high(struct page *page); extern void kunmap_high(struct page *page);
......
...@@ -179,6 +179,12 @@ static inline void __iomem *__typesafe_io(unsigned long addr) ...@@ -179,6 +179,12 @@ static inline void __iomem *__typesafe_io(unsigned long addr)
/* PCI fixed i/o mapping */ /* PCI fixed i/o mapping */
#define PCI_IO_VIRT_BASE 0xfee00000 #define PCI_IO_VIRT_BASE 0xfee00000
#if defined(CONFIG_PCI)
void pci_ioremap_set_mem_type(int mem_type);
#else
static inline void pci_ioremap_set_mem_type(int mem_type) {}
#endif
extern int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr); extern int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr);
/* /*
......
...@@ -14,7 +14,6 @@ ...@@ -14,7 +14,6 @@
#include <linux/reboot.h> #include <linux/reboot.h>
struct tag; struct tag;
struct meminfo;
struct pt_regs; struct pt_regs;
struct smp_operations; struct smp_operations;
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -45,10 +44,12 @@ struct machine_desc { ...@@ -45,10 +44,12 @@ struct machine_desc {
unsigned char reserve_lp1 :1; /* never has lp1 */ unsigned char reserve_lp1 :1; /* never has lp1 */
unsigned char reserve_lp2 :1; /* never has lp2 */ unsigned char reserve_lp2 :1; /* never has lp2 */
enum reboot_mode reboot_mode; /* default restart mode */ enum reboot_mode reboot_mode; /* default restart mode */
unsigned l2c_aux_val; /* L2 cache aux value */
unsigned l2c_aux_mask; /* L2 cache aux mask */
void (*l2c_write_sec)(unsigned long, unsigned);
struct smp_operations *smp; /* SMP operations */ struct smp_operations *smp; /* SMP operations */
bool (*smp_init)(void); bool (*smp_init)(void);
void (*fixup)(struct tag *, char **, void (*fixup)(struct tag *, char **);
struct meminfo *);
void (*init_meminfo)(void); void (*init_meminfo)(void);
void (*reserve)(void);/* reserve mem blocks */ void (*reserve)(void);/* reserve mem blocks */
void (*map_io)(void);/* IO mapping function */ void (*map_io)(void);/* IO mapping function */
......
...@@ -98,14 +98,14 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster); ...@@ -98,14 +98,14 @@ int mcpm_cpu_power_up(unsigned int cpu, unsigned int cluster);
* previously in which case the caller should take appropriate action. * previously in which case the caller should take appropriate action.
* *
* On success, the CPU is not guaranteed to be truly halted until * On success, the CPU is not guaranteed to be truly halted until
* mcpm_cpu_power_down_finish() subsequently returns non-zero for the * mcpm_wait_for_cpu_powerdown() subsequently returns non-zero for the
* specified cpu. Until then, other CPUs should make sure they do not * specified cpu. Until then, other CPUs should make sure they do not
* trash memory the target CPU might be executing/accessing. * trash memory the target CPU might be executing/accessing.
*/ */
void mcpm_cpu_power_down(void); void mcpm_cpu_power_down(void);
/** /**
* mcpm_cpu_power_down_finish - wait for a specified CPU to halt, and * mcpm_wait_for_cpu_powerdown - wait for a specified CPU to halt, and
* make sure it is powered off * make sure it is powered off
* *
* @cpu: CPU number within given cluster * @cpu: CPU number within given cluster
...@@ -127,7 +127,7 @@ void mcpm_cpu_power_down(void); ...@@ -127,7 +127,7 @@ void mcpm_cpu_power_down(void);
* - zero if the CPU is in a safely parked state * - zero if the CPU is in a safely parked state
* - nonzero otherwise (e.g., timeout) * - nonzero otherwise (e.g., timeout)
*/ */
int mcpm_cpu_power_down_finish(unsigned int cpu, unsigned int cluster); int mcpm_wait_for_cpu_powerdown(unsigned int cpu, unsigned int cluster);
/** /**
* mcpm_cpu_suspend - bring the calling CPU in a suspended state * mcpm_cpu_suspend - bring the calling CPU in a suspended state
...@@ -171,7 +171,7 @@ int mcpm_cpu_powered_up(void); ...@@ -171,7 +171,7 @@ int mcpm_cpu_powered_up(void);
struct mcpm_platform_ops { struct mcpm_platform_ops {
int (*power_up)(unsigned int cpu, unsigned int cluster); int (*power_up)(unsigned int cpu, unsigned int cluster);
void (*power_down)(void); void (*power_down)(void);
int (*power_down_finish)(unsigned int cpu, unsigned int cluster); int (*wait_for_powerdown)(unsigned int cpu, unsigned int cluster);
void (*suspend)(u64); void (*suspend)(u64);
void (*powered_up)(void); void (*powered_up)(void);
}; };
......
#ifndef _ASM_ARM_MEMBLOCK_H #ifndef _ASM_ARM_MEMBLOCK_H
#define _ASM_ARM_MEMBLOCK_H #define _ASM_ARM_MEMBLOCK_H
struct meminfo;
struct machine_desc; struct machine_desc;
void arm_memblock_init(struct meminfo *, const struct machine_desc *); void arm_memblock_init(const struct machine_desc *);
phys_addr_t arm_memblock_steal(phys_addr_t size, phys_addr_t align); phys_addr_t arm_memblock_steal(phys_addr_t size, phys_addr_t align);
#endif #endif
...@@ -83,8 +83,6 @@ ...@@ -83,8 +83,6 @@
*/ */
#define IOREMAP_MAX_ORDER 24 #define IOREMAP_MAX_ORDER 24
#define CONSISTENT_END (0xffe00000UL)
#else /* CONFIG_MMU */ #else /* CONFIG_MMU */
/* /*
......
...@@ -28,53 +28,84 @@ struct outer_cache_fns { ...@@ -28,53 +28,84 @@ struct outer_cache_fns {
void (*clean_range)(unsigned long, unsigned long); void (*clean_range)(unsigned long, unsigned long);
void (*flush_range)(unsigned long, unsigned long); void (*flush_range)(unsigned long, unsigned long);
void (*flush_all)(void); void (*flush_all)(void);
void (*inv_all)(void);
void (*disable)(void); void (*disable)(void);
#ifdef CONFIG_OUTER_CACHE_SYNC #ifdef CONFIG_OUTER_CACHE_SYNC
void (*sync)(void); void (*sync)(void);
#endif #endif
void (*set_debug)(unsigned long);
void (*resume)(void); void (*resume)(void);
/* This is an ARM L2C thing */
void (*write_sec)(unsigned long, unsigned);
}; };
extern struct outer_cache_fns outer_cache; extern struct outer_cache_fns outer_cache;
#ifdef CONFIG_OUTER_CACHE #ifdef CONFIG_OUTER_CACHE
/**
* outer_inv_range - invalidate range of outer cache lines
* @start: starting physical address, inclusive
* @end: end physical address, exclusive
*/
static inline void outer_inv_range(phys_addr_t start, phys_addr_t end) static inline void outer_inv_range(phys_addr_t start, phys_addr_t end)
{ {
if (outer_cache.inv_range) if (outer_cache.inv_range)
outer_cache.inv_range(start, end); outer_cache.inv_range(start, end);
} }
/**
* outer_clean_range - clean dirty outer cache lines
* @start: starting physical address, inclusive
* @end: end physical address, exclusive
*/
static inline void outer_clean_range(phys_addr_t start, phys_addr_t end) static inline void outer_clean_range(phys_addr_t start, phys_addr_t end)
{ {
if (outer_cache.clean_range) if (outer_cache.clean_range)
outer_cache.clean_range(start, end); outer_cache.clean_range(start, end);
} }
/**
* outer_flush_range - clean and invalidate outer cache lines
* @start: starting physical address, inclusive
* @end: end physical address, exclusive
*/
static inline void outer_flush_range(phys_addr_t start, phys_addr_t end) static inline void outer_flush_range(phys_addr_t start, phys_addr_t end)
{ {
if (outer_cache.flush_range) if (outer_cache.flush_range)
outer_cache.flush_range(start, end); outer_cache.flush_range(start, end);
} }
/**
* outer_flush_all - clean and invalidate all cache lines in the outer cache
*
* Note: depending on implementation, this may not be atomic - it must
* only be called with interrupts disabled and no other active outer
* cache masters.
*
* It is intended that this function is only used by implementations
* needing to override the outer_cache.disable() method due to security.
* (Some implementations perform this as a clean followed by an invalidate.)
*/
static inline void outer_flush_all(void) static inline void outer_flush_all(void)
{ {
if (outer_cache.flush_all) if (outer_cache.flush_all)
outer_cache.flush_all(); outer_cache.flush_all();
} }
static inline void outer_inv_all(void) /**
{ * outer_disable - clean, invalidate and disable the outer cache
if (outer_cache.inv_all) *
outer_cache.inv_all(); * Disable the outer cache, ensuring that any data contained in the outer
} * cache is pushed out to lower levels of system memory. The note and
* conditions above concerning outer_flush_all() applies here.
static inline void outer_disable(void) */
{ extern void outer_disable(void);
if (outer_cache.disable)
outer_cache.disable();
}
/**
* outer_resume - restore the cache configuration and re-enable outer cache
*
* Restore any configuration that the cache had when previously enabled,
* and re-enable the outer cache.
*/
static inline void outer_resume(void) static inline void outer_resume(void)
{ {
if (outer_cache.resume) if (outer_cache.resume)
...@@ -90,13 +121,18 @@ static inline void outer_clean_range(phys_addr_t start, phys_addr_t end) ...@@ -90,13 +121,18 @@ static inline void outer_clean_range(phys_addr_t start, phys_addr_t end)
static inline void outer_flush_range(phys_addr_t start, phys_addr_t end) static inline void outer_flush_range(phys_addr_t start, phys_addr_t end)
{ } { }
static inline void outer_flush_all(void) { } static inline void outer_flush_all(void) { }
static inline void outer_inv_all(void) { }
static inline void outer_disable(void) { } static inline void outer_disable(void) { }
static inline void outer_resume(void) { } static inline void outer_resume(void) { }
#endif #endif
#ifdef CONFIG_OUTER_CACHE_SYNC #ifdef CONFIG_OUTER_CACHE_SYNC
/**
* outer_sync - perform a sync point for outer cache
*
* Ensure that all outer cache operations are complete and any store
* buffers are drained.
*/
static inline void outer_sync(void) static inline void outer_sync(void)
{ {
if (outer_cache.sync) if (outer_cache.sync)
......
...@@ -21,34 +21,6 @@ ...@@ -21,34 +21,6 @@
#define __tagtable(tag, fn) \ #define __tagtable(tag, fn) \
static const struct tagtable __tagtable_##fn __tag = { tag, fn } static const struct tagtable __tagtable_##fn __tag = { tag, fn }
/*
* Memory map description
*/
#define NR_BANKS CONFIG_ARM_NR_BANKS
struct membank {
phys_addr_t start;
phys_addr_t size;
unsigned int highmem;
};
struct meminfo {
int nr_banks;
struct membank bank[NR_BANKS];
};
extern struct meminfo meminfo;
#define for_each_bank(iter,mi) \
for (iter = 0; iter < (mi)->nr_banks; iter++)
#define bank_pfn_start(bank) __phys_to_pfn((bank)->start)
#define bank_pfn_end(bank) __phys_to_pfn((bank)->start + (bank)->size)
#define bank_pfn_size(bank) ((bank)->size >> PAGE_SHIFT)
#define bank_phys_start(bank) (bank)->start
#define bank_phys_end(bank) ((bank)->start + (bank)->size)
#define bank_phys_size(bank) (bank)->size
extern int arm_add_memory(u64 start, u64 size); extern int arm_add_memory(u64 start, u64 size);
extern void early_print(const char *str, ...); extern void early_print(const char *str, ...);
extern void dump_machine_table(void); extern void dump_machine_table(void);
......
...@@ -39,6 +39,7 @@ obj-$(CONFIG_ARTHUR) += arthur.o ...@@ -39,6 +39,7 @@ obj-$(CONFIG_ARTHUR) += arthur.o
obj-$(CONFIG_ISA_DMA) += dma-isa.o obj-$(CONFIG_ISA_DMA) += dma-isa.o
obj-$(CONFIG_PCI) += bios32.o isa.o obj-$(CONFIG_PCI) += bios32.o isa.o
obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o obj-$(CONFIG_ARM_CPU_SUSPEND) += sleep.o suspend.o
obj-$(CONFIG_HIBERNATION) += hibernate.o
obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SMP) += smp.o
ifdef CONFIG_MMU ifdef CONFIG_MMU
obj-$(CONFIG_SMP) += smp_tlb.o obj-$(CONFIG_SMP) += smp_tlb.o
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <linux/fs.h> #include <linux/fs.h>
#include <linux/root_dev.h> #include <linux/root_dev.h>
#include <linux/screen_info.h> #include <linux/screen_info.h>
#include <linux/memblock.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/system_info.h> #include <asm/system_info.h>
...@@ -222,10 +223,10 @@ setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr) ...@@ -222,10 +223,10 @@ setup_machine_tags(phys_addr_t __atags_pointer, unsigned int machine_nr)
} }
if (mdesc->fixup) if (mdesc->fixup)
mdesc->fixup(tags, &from, &meminfo); mdesc->fixup(tags, &from);
if (tags->hdr.tag == ATAG_CORE) { if (tags->hdr.tag == ATAG_CORE) {
if (meminfo.nr_banks != 0) if (memblock_phys_mem_size())
squash_mem_tags(tags); squash_mem_tags(tags);
save_atags(tags); save_atags(tags);
parse_tags(tags); parse_tags(tags);
......
...@@ -27,10 +27,6 @@ ...@@ -27,10 +27,6 @@
#include <asm/mach/arch.h> #include <asm/mach/arch.h>
#include <asm/mach-types.h> #include <asm/mach-types.h>
void __init early_init_dt_add_memory_arch(u64 base, u64 size)
{
arm_add_memory(base, size);
}
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
extern struct of_cpu_method __cpu_method_of_table[]; extern struct of_cpu_method __cpu_method_of_table[];
......
...@@ -344,7 +344,7 @@ ENDPROC(__pabt_svc) ...@@ -344,7 +344,7 @@ ENDPROC(__pabt_svc)
@ @
@ Enable the alignment trap while in kernel mode @ Enable the alignment trap while in kernel mode
@ @
alignment_trap r0 alignment_trap r0, .LCcralign
@ @
@ Clear FP to mark the first stack frame @ Clear FP to mark the first stack frame
...@@ -413,6 +413,11 @@ __und_usr: ...@@ -413,6 +413,11 @@ __und_usr:
@ @
adr r9, BSYM(ret_from_exception) adr r9, BSYM(ret_from_exception)
@ IRQs must be enabled before attempting to read the instruction from
@ user space since that could cause a page/translation fault if the
@ page table was modified by another CPU.
enable_irq
tst r3, #PSR_T_BIT @ Thumb mode? tst r3, #PSR_T_BIT @ Thumb mode?
bne __und_usr_thumb bne __und_usr_thumb
sub r4, r2, #4 @ ARM instr at LR - 4 sub r4, r2, #4 @ ARM instr at LR - 4
...@@ -484,7 +489,8 @@ ENDPROC(__und_usr) ...@@ -484,7 +489,8 @@ ENDPROC(__und_usr)
*/ */
.pushsection .fixup, "ax" .pushsection .fixup, "ax"
.align 2 .align 2
4: mov pc, r9 4: str r4, [sp, #S_PC] @ retry current instruction
mov pc, r9
.popsection .popsection
.pushsection __ex_table,"a" .pushsection __ex_table,"a"
.long 1b, 4b .long 1b, 4b
...@@ -517,7 +523,7 @@ ENDPROC(__und_usr) ...@@ -517,7 +523,7 @@ ENDPROC(__und_usr)
* r9 = normal "successful" return address * r9 = normal "successful" return address
* r10 = this threads thread_info structure * r10 = this threads thread_info structure
* lr = unrecognised instruction return address * lr = unrecognised instruction return address
* IRQs disabled, FIQs enabled. * IRQs enabled, FIQs enabled.
*/ */
@ @
@ Fall-through from Thumb-2 __und_usr @ Fall-through from Thumb-2 __und_usr
...@@ -624,7 +630,6 @@ call_fpe: ...@@ -624,7 +630,6 @@ call_fpe:
#endif #endif
do_fpe: do_fpe:
enable_irq
ldr r4, .LCfp ldr r4, .LCfp
add r10, r10, #TI_FPSTATE @ r10 = workspace add r10, r10, #TI_FPSTATE @ r10 = workspace
ldr pc, [r4] @ Call FP module USR entry point ldr pc, [r4] @ Call FP module USR entry point
...@@ -652,8 +657,7 @@ __und_usr_fault_32: ...@@ -652,8 +657,7 @@ __und_usr_fault_32:
b 1f b 1f
__und_usr_fault_16: __und_usr_fault_16:
mov r1, #2 mov r1, #2
1: enable_irq 1: mov r0, sp
mov r0, sp
adr lr, BSYM(ret_from_exception) adr lr, BSYM(ret_from_exception)
b __und_fault b __und_fault
ENDPROC(__und_usr_fault_32) ENDPROC(__und_usr_fault_32)
...@@ -1143,11 +1147,8 @@ __vectors_start: ...@@ -1143,11 +1147,8 @@ __vectors_start:
.data .data
.globl cr_alignment .globl cr_alignment
.globl cr_no_alignment
cr_alignment: cr_alignment:
.space 4 .space 4
cr_no_alignment:
.space 4
#ifdef CONFIG_MULTI_IRQ_HANDLER #ifdef CONFIG_MULTI_IRQ_HANDLER
.globl handle_arch_irq .globl handle_arch_irq
......
...@@ -365,13 +365,7 @@ ENTRY(vector_swi) ...@@ -365,13 +365,7 @@ ENTRY(vector_swi)
str r0, [sp, #S_OLD_R0] @ Save OLD_R0 str r0, [sp, #S_OLD_R0] @ Save OLD_R0
#endif #endif
zero_fp zero_fp
alignment_trap ip, __cr_alignment
#ifdef CONFIG_ALIGNMENT_TRAP
ldr ip, __cr_alignment
ldr ip, [ip]
mcr p15, 0, ip, c1, c0 @ update control register
#endif
enable_irq enable_irq
ct_user_exit ct_user_exit
get_thread_info tsk get_thread_info tsk
......
...@@ -37,9 +37,9 @@ ...@@ -37,9 +37,9 @@
#endif #endif
.endm .endm
.macro alignment_trap, rtemp .macro alignment_trap, rtemp, label
#ifdef CONFIG_ALIGNMENT_TRAP #ifdef CONFIG_ALIGNMENT_TRAP
ldr \rtemp, .LCcralign ldr \rtemp, \label
ldr \rtemp, [\rtemp] ldr \rtemp, [\rtemp]
mcr p15, 0, \rtemp, c1, c0 mcr p15, 0, \rtemp, c1, c0
#endif #endif
......
...@@ -14,6 +14,7 @@ ...@@ -14,6 +14,7 @@
#include <linux/ftrace.h> #include <linux/ftrace.h>
#include <linux/uaccess.h> #include <linux/uaccess.h>
#include <linux/module.h>
#include <asm/cacheflush.h> #include <asm/cacheflush.h>
#include <asm/opcodes.h> #include <asm/opcodes.h>
...@@ -63,6 +64,18 @@ static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr) ...@@ -63,6 +64,18 @@ static unsigned long adjust_address(struct dyn_ftrace *rec, unsigned long addr)
} }
#endif #endif
int ftrace_arch_code_modify_prepare(void)
{
set_all_modules_text_rw();
return 0;
}
int ftrace_arch_code_modify_post_process(void)
{
set_all_modules_text_ro();
return 0;
}
static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr) static unsigned long ftrace_call_replace(unsigned long pc, unsigned long addr)
{ {
return arm_gen_branch_link(pc, addr); return arm_gen_branch_link(pc, addr);
......
...@@ -99,8 +99,7 @@ __mmap_switched: ...@@ -99,8 +99,7 @@ __mmap_switched:
str r1, [r5] @ Save machine type str r1, [r5] @ Save machine type
str r2, [r6] @ Save atags pointer str r2, [r6] @ Save atags pointer
cmp r7, #0 cmp r7, #0
bicne r4, r0, #CR_A @ Clear 'A' bit strne r0, [r7] @ Save control register values
stmneia r7, {r0, r4} @ Save control register values
b start_kernel b start_kernel
ENDPROC(__mmap_switched) ENDPROC(__mmap_switched)
......
...@@ -475,7 +475,7 @@ ENDPROC(__turn_mmu_on) ...@@ -475,7 +475,7 @@ ENDPROC(__turn_mmu_on)
#ifdef CONFIG_SMP_ON_UP #ifdef CONFIG_SMP_ON_UP
__INIT __HEAD
__fixup_smp: __fixup_smp:
and r3, r9, #0x000f0000 @ architecture version and r3, r9, #0x000f0000 @ architecture version
teq r3, #0x000f0000 @ CPU ID supported? teq r3, #0x000f0000 @ CPU ID supported?
......
/*
* Hibernation support specific for ARM
*
* Derived from work on ARM hibernation support by:
*
* Ubuntu project, hibernation support for mach-dove
* Copyright (C) 2010 Nokia Corporation (Hiroshi Doyu)
* Copyright (C) 2010 Texas Instruments, Inc. (Teerth Reddy et al.)
* https://lkml.org/lkml/2010/6/18/4
* https://lists.linux-foundation.org/pipermail/linux-pm/2010-June/027422.html
* https://patchwork.kernel.org/patch/96442/
*
* Copyright (C) 2006 Rafael J. Wysocki <rjw@sisk.pl>
*
* License terms: GNU General Public License (GPL) version 2
*/
#include <linux/mm.h>
#include <linux/suspend.h>
#include <asm/system_misc.h>
#include <asm/idmap.h>
#include <asm/suspend.h>
#include <asm/memory.h>
extern const void __nosave_begin, __nosave_end;
int pfn_is_nosave(unsigned long pfn)
{
unsigned long nosave_begin_pfn = virt_to_pfn(&__nosave_begin);
unsigned long nosave_end_pfn = virt_to_pfn(&__nosave_end - 1);
return (pfn >= nosave_begin_pfn) && (pfn <= nosave_end_pfn);
}
void notrace save_processor_state(void)
{
WARN_ON(num_online_cpus() != 1);
local_fiq_disable();
}
void notrace restore_processor_state(void)
{
local_fiq_enable();
}
/*
* Snapshot kernel memory and reset the system.
*
* swsusp_save() is executed in the suspend finisher so that the CPU
* context pointer and memory are part of the saved image, which is
* required by the resume kernel image to restart execution from
* swsusp_arch_suspend().
*
* soft_restart is not technically needed, but is used to get success
* returned from cpu_suspend.
*
* When soft reboot completes, the hibernation snapshot is written out.
*/
static int notrace arch_save_image(unsigned long unused)
{
int ret;
ret = swsusp_save();
if (ret == 0)
soft_restart(virt_to_phys(cpu_resume));
return ret;
}
/*
* Save the current CPU state before suspend / poweroff.
*/
int notrace swsusp_arch_suspend(void)
{
return cpu_suspend(0, arch_save_image);
}
/*
* Restore page contents for physical pages that were in use during loading
* hibernation image. Switch to idmap_pgd so the physical page tables
* are overwritten with the same contents.
*/
static void notrace arch_restore_image(void *unused)
{
struct pbe *pbe;
cpu_switch_mm(idmap_pgd, &init_mm);
for (pbe = restore_pblist; pbe; pbe = pbe->next)
copy_page(pbe->orig_address, pbe->address);
soft_restart(virt_to_phys(cpu_resume));
}
static u64 resume_stack[PAGE_SIZE/2/sizeof(u64)] __nosavedata;
/*
* Resume from the hibernation image.
* Due to the kernel heap / data restore, stack contents change underneath
* and that would make function calls impossible; switch to a temporary
* stack within the nosave region to avoid that problem.
*/
int swsusp_arch_resume(void)
{
extern void call_with_stack(void (*fn)(void *), void *arg, void *sp);
call_with_stack(arch_restore_image, 0,
resume_stack + ARRAY_SIZE(resume_stack));
return 0;
}
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include <linux/proc_fs.h> #include <linux/proc_fs.h>
#include <linux/export.h> #include <linux/export.h>
#include <asm/hardware/cache-l2x0.h>
#include <asm/exception.h> #include <asm/exception.h>
#include <asm/mach/arch.h> #include <asm/mach/arch.h>
#include <asm/mach/irq.h> #include <asm/mach/irq.h>
...@@ -115,10 +116,21 @@ EXPORT_SYMBOL_GPL(set_irq_flags); ...@@ -115,10 +116,21 @@ EXPORT_SYMBOL_GPL(set_irq_flags);
void __init init_IRQ(void) void __init init_IRQ(void)
{ {
int ret;
if (IS_ENABLED(CONFIG_OF) && !machine_desc->init_irq) if (IS_ENABLED(CONFIG_OF) && !machine_desc->init_irq)
irqchip_init(); irqchip_init();
else else
machine_desc->init_irq(); machine_desc->init_irq();
if (IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_CACHE_L2X0) &&
(machine_desc->l2c_aux_mask || machine_desc->l2c_aux_val)) {
outer_cache.write_sec = machine_desc->l2c_write_sec;
ret = l2x0_of_init(machine_desc->l2c_aux_val,
machine_desc->l2c_aux_mask);
if (ret)
pr_err("L2C: failed to init: %d\n", ret);
}
} }
#ifdef CONFIG_MULTI_IRQ_HANDLER #ifdef CONFIG_MULTI_IRQ_HANDLER
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/assembler.h>
#if defined(CONFIG_CPU_PJ4) || defined(CONFIG_CPU_PJ4B) #if defined(CONFIG_CPU_PJ4) || defined(CONFIG_CPU_PJ4B)
#define PJ4(code...) code #define PJ4(code...) code
...@@ -65,17 +66,18 @@ ...@@ -65,17 +66,18 @@
* r9 = ret_from_exception * r9 = ret_from_exception
* lr = undefined instr exit * lr = undefined instr exit
* *
* called from prefetch exception handler with interrupts disabled * called from prefetch exception handler with interrupts enabled
*/ */
ENTRY(iwmmxt_task_enable) ENTRY(iwmmxt_task_enable)
inc_preempt_count r10, r3
XSC(mrc p15, 0, r2, c15, c1, 0) XSC(mrc p15, 0, r2, c15, c1, 0)
PJ4(mrc p15, 0, r2, c1, c0, 2) PJ4(mrc p15, 0, r2, c1, c0, 2)
@ CP0 and CP1 accessible? @ CP0 and CP1 accessible?
XSC(tst r2, #0x3) XSC(tst r2, #0x3)
PJ4(tst r2, #0xf) PJ4(tst r2, #0xf)
movne pc, lr @ if so no business here bne 4f @ if so no business here
@ enable access to CP0 and CP1 @ enable access to CP0 and CP1
XSC(orr r2, r2, #0x3) XSC(orr r2, r2, #0x3)
XSC(mcr p15, 0, r2, c15, c1, 0) XSC(mcr p15, 0, r2, c15, c1, 0)
...@@ -136,7 +138,7 @@ concan_dump: ...@@ -136,7 +138,7 @@ concan_dump:
wstrd wR15, [r1, #MMX_WR15] wstrd wR15, [r1, #MMX_WR15]
2: teq r0, #0 @ anything to load? 2: teq r0, #0 @ anything to load?
moveq pc, lr beq 3f
concan_load: concan_load:
...@@ -169,8 +171,14 @@ concan_load: ...@@ -169,8 +171,14 @@ concan_load:
@ clear CUP/MUP (only if r1 != 0) @ clear CUP/MUP (only if r1 != 0)
teq r1, #0 teq r1, #0
mov r2, #0 mov r2, #0
moveq pc, lr beq 3f
tmcr wCon, r2 tmcr wCon, r2
3:
#ifdef CONFIG_PREEMPT_COUNT
get_thread_info r10
#endif
4: dec_preempt_count r10, r3
mov pc, lr mov pc, lr
/* /*
......
...@@ -221,6 +221,7 @@ static struct notifier_block cpu_pmu_hotplug_notifier = { ...@@ -221,6 +221,7 @@ static struct notifier_block cpu_pmu_hotplug_notifier = {
* PMU platform driver and devicetree bindings. * PMU platform driver and devicetree bindings.
*/ */
static struct of_device_id cpu_pmu_of_device_ids[] = { static struct of_device_id cpu_pmu_of_device_ids[] = {
{.compatible = "arm,cortex-a17-pmu", .data = armv7_a17_pmu_init},
{.compatible = "arm,cortex-a15-pmu", .data = armv7_a15_pmu_init}, {.compatible = "arm,cortex-a15-pmu", .data = armv7_a15_pmu_init},
{.compatible = "arm,cortex-a12-pmu", .data = armv7_a12_pmu_init}, {.compatible = "arm,cortex-a12-pmu", .data = armv7_a12_pmu_init},
{.compatible = "arm,cortex-a9-pmu", .data = armv7_a9_pmu_init}, {.compatible = "arm,cortex-a9-pmu", .data = armv7_a9_pmu_init},
......
...@@ -1599,6 +1599,13 @@ static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu) ...@@ -1599,6 +1599,13 @@ static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
return 0; return 0;
} }
static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu)
{
armv7_a12_pmu_init(cpu_pmu);
cpu_pmu->name = "ARMv7 Cortex-A17";
return 0;
}
/* /*
* Krait Performance Monitor Region Event Selection Register (PMRESRn) * Krait Performance Monitor Region Event Selection Register (PMRESRn)
* *
...@@ -2021,6 +2028,11 @@ static inline int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu) ...@@ -2021,6 +2028,11 @@ static inline int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
return -ENODEV; return -ENODEV;
} }
static inline int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu)
{
return -ENODEV;
}
static inline int krait_pmu_init(struct arm_pmu *cpu_pmu) static inline int krait_pmu_init(struct arm_pmu *cpu_pmu)
{ {
return -ENODEV; return -ENODEV;
......
...@@ -72,6 +72,7 @@ static int __init fpe_setup(char *line) ...@@ -72,6 +72,7 @@ static int __init fpe_setup(char *line)
__setup("fpe=", fpe_setup); __setup("fpe=", fpe_setup);
#endif #endif
extern void init_default_cache_policy(unsigned long);
extern void paging_init(const struct machine_desc *desc); extern void paging_init(const struct machine_desc *desc);
extern void early_paging_init(const struct machine_desc *, extern void early_paging_init(const struct machine_desc *,
struct proc_info_list *); struct proc_info_list *);
...@@ -590,7 +591,7 @@ static void __init setup_processor(void) ...@@ -590,7 +591,7 @@ static void __init setup_processor(void)
pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n", pr_info("CPU: %s [%08x] revision %d (ARMv%s), cr=%08lx\n",
cpu_name, read_cpuid_id(), read_cpuid_id() & 15, cpu_name, read_cpuid_id(), read_cpuid_id() & 15,
proc_arch[cpu_architecture()], cr_alignment); proc_arch[cpu_architecture()], get_cr());
snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c", snprintf(init_utsname()->machine, __NEW_UTS_LEN + 1, "%s%c",
list->arch_name, ENDIANNESS); list->arch_name, ENDIANNESS);
...@@ -603,7 +604,9 @@ static void __init setup_processor(void) ...@@ -603,7 +604,9 @@ static void __init setup_processor(void)
#ifndef CONFIG_ARM_THUMB #ifndef CONFIG_ARM_THUMB
elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT); elf_hwcap &= ~(HWCAP_THUMB | HWCAP_IDIVT);
#endif #endif
#ifdef CONFIG_MMU
init_default_cache_policy(list->__cpu_mm_mmu_flags);
#endif
erratum_a15_798181_init(); erratum_a15_798181_init();
feat_v6_fixup(); feat_v6_fixup();
...@@ -628,15 +631,8 @@ void __init dump_machine_table(void) ...@@ -628,15 +631,8 @@ void __init dump_machine_table(void)
int __init arm_add_memory(u64 start, u64 size) int __init arm_add_memory(u64 start, u64 size)
{ {
struct membank *bank = &meminfo.bank[meminfo.nr_banks];
u64 aligned_start; u64 aligned_start;
if (meminfo.nr_banks >= NR_BANKS) {
pr_crit("NR_BANKS too low, ignoring memory at 0x%08llx\n",
(long long)start);
return -EINVAL;
}
/* /*
* Ensure that start/size are aligned to a page boundary. * Ensure that start/size are aligned to a page boundary.
* Size is appropriately rounded down, start is rounded up. * Size is appropriately rounded down, start is rounded up.
...@@ -677,17 +673,17 @@ int __init arm_add_memory(u64 start, u64 size) ...@@ -677,17 +673,17 @@ int __init arm_add_memory(u64 start, u64 size)
aligned_start = PHYS_OFFSET; aligned_start = PHYS_OFFSET;
} }
bank->start = aligned_start; start = aligned_start;
bank->size = size & ~(phys_addr_t)(PAGE_SIZE - 1); size = size & ~(phys_addr_t)(PAGE_SIZE - 1);
/* /*
* Check whether this memory region has non-zero size or * Check whether this memory region has non-zero size or
* invalid node number. * invalid node number.
*/ */
if (bank->size == 0) if (size == 0)
return -EINVAL; return -EINVAL;
meminfo.nr_banks++; memblock_add(start, size);
return 0; return 0;
} }
...@@ -695,6 +691,7 @@ int __init arm_add_memory(u64 start, u64 size) ...@@ -695,6 +691,7 @@ int __init arm_add_memory(u64 start, u64 size)
* Pick out the memory size. We look for mem=size@start, * Pick out the memory size. We look for mem=size@start,
* where start and size are "size[KkMm]" * where start and size are "size[KkMm]"
*/ */
static int __init early_mem(char *p) static int __init early_mem(char *p)
{ {
static int usermem __initdata = 0; static int usermem __initdata = 0;
...@@ -709,7 +706,8 @@ static int __init early_mem(char *p) ...@@ -709,7 +706,8 @@ static int __init early_mem(char *p)
*/ */
if (usermem == 0) { if (usermem == 0) {
usermem = 1; usermem = 1;
meminfo.nr_banks = 0; memblock_remove(memblock_start_of_DRAM(),
memblock_end_of_DRAM() - memblock_start_of_DRAM());
} }
start = PHYS_OFFSET; start = PHYS_OFFSET;
...@@ -854,13 +852,6 @@ static void __init reserve_crashkernel(void) ...@@ -854,13 +852,6 @@ static void __init reserve_crashkernel(void)
static inline void reserve_crashkernel(void) {} static inline void reserve_crashkernel(void) {}
#endif /* CONFIG_KEXEC */ #endif /* CONFIG_KEXEC */
static int __init meminfo_cmp(const void *_a, const void *_b)
{
const struct membank *a = _a, *b = _b;
long cmp = bank_pfn_start(a) - bank_pfn_start(b);
return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
}
void __init hyp_mode_check(void) void __init hyp_mode_check(void)
{ {
#ifdef CONFIG_ARM_VIRT_EXT #ifdef CONFIG_ARM_VIRT_EXT
...@@ -903,12 +894,10 @@ void __init setup_arch(char **cmdline_p) ...@@ -903,12 +894,10 @@ void __init setup_arch(char **cmdline_p)
parse_early_param(); parse_early_param();
sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
early_paging_init(mdesc, lookup_processor_type(read_cpuid_id())); early_paging_init(mdesc, lookup_processor_type(read_cpuid_id()));
setup_dma_zone(mdesc); setup_dma_zone(mdesc);
sanity_check_meminfo(); sanity_check_meminfo();
arm_memblock_init(&meminfo, mdesc); arm_memblock_init(mdesc);
paging_init(mdesc); paging_init(mdesc);
request_standard_resources(mdesc); request_standard_resources(mdesc);
......
...@@ -127,6 +127,10 @@ ENDPROC(cpu_resume_after_mmu) ...@@ -127,6 +127,10 @@ ENDPROC(cpu_resume_after_mmu)
.align .align
ENTRY(cpu_resume) ENTRY(cpu_resume)
ARM_BE8(setend be) @ ensure we are in BE mode ARM_BE8(setend be) @ ensure we are in BE mode
#ifdef CONFIG_ARM_VIRT_EXT
bl __hyp_stub_install_secondary
#endif
safe_svcmode_maskall r1
mov r1, #0 mov r1, #0
ALT_SMP(mrc p15, 0, r0, c0, c0, 5) ALT_SMP(mrc p15, 0, r0, c0, c0, 5)
ALT_UP_B(1f) ALT_UP_B(1f)
...@@ -144,7 +148,6 @@ ARM_BE8(setend be) @ ensure we are in BE mode ...@@ -144,7 +148,6 @@ ARM_BE8(setend be) @ ensure we are in BE mode
ldr r0, [r0, #SLEEP_SAVE_SP_PHYS] ldr r0, [r0, #SLEEP_SAVE_SP_PHYS]
ldr r0, [r0, r1, lsl #2] ldr r0, [r0, r1, lsl #2]
setmode PSR_I_BIT | PSR_F_BIT | SVC_MODE, r1 @ set SVC, irqs off
@ load phys pgd, stack, resume fn @ load phys pgd, stack, resume fn
ARM( ldmia r0!, {r1, sp, pc} ) ARM( ldmia r0!, {r1, sp, pc} )
THUMB( ldmia r0!, {r1, r2, r3} ) THUMB( ldmia r0!, {r1, r2, r3} )
......
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
#include <linux/stacktrace.h> #include <linux/stacktrace.h>
#include <asm/stacktrace.h> #include <asm/stacktrace.h>
#include <asm/traps.h>
#if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND) #if defined(CONFIG_FRAME_POINTER) && !defined(CONFIG_ARM_UNWIND)
/* /*
...@@ -61,6 +62,7 @@ EXPORT_SYMBOL(walk_stackframe); ...@@ -61,6 +62,7 @@ EXPORT_SYMBOL(walk_stackframe);
#ifdef CONFIG_STACKTRACE #ifdef CONFIG_STACKTRACE
struct stack_trace_data { struct stack_trace_data {
struct stack_trace *trace; struct stack_trace *trace;
unsigned long last_pc;
unsigned int no_sched_functions; unsigned int no_sched_functions;
unsigned int skip; unsigned int skip;
}; };
...@@ -69,6 +71,7 @@ static int save_trace(struct stackframe *frame, void *d) ...@@ -69,6 +71,7 @@ static int save_trace(struct stackframe *frame, void *d)
{ {
struct stack_trace_data *data = d; struct stack_trace_data *data = d;
struct stack_trace *trace = data->trace; struct stack_trace *trace = data->trace;
struct pt_regs *regs;
unsigned long addr = frame->pc; unsigned long addr = frame->pc;
if (data->no_sched_functions && in_sched_functions(addr)) if (data->no_sched_functions && in_sched_functions(addr))
...@@ -80,16 +83,39 @@ static int save_trace(struct stackframe *frame, void *d) ...@@ -80,16 +83,39 @@ static int save_trace(struct stackframe *frame, void *d)
trace->entries[trace->nr_entries++] = addr; trace->entries[trace->nr_entries++] = addr;
if (trace->nr_entries >= trace->max_entries)
return 1;
/*
* in_exception_text() is designed to test if the PC is one of
* the functions which has an exception stack above it, but
* unfortunately what is in frame->pc is the return LR value,
* not the saved PC value. So, we need to track the previous
* frame PC value when doing this.
*/
addr = data->last_pc;
data->last_pc = frame->pc;
if (!in_exception_text(addr))
return 0;
regs = (struct pt_regs *)frame->sp;
trace->entries[trace->nr_entries++] = regs->ARM_pc;
return trace->nr_entries >= trace->max_entries; return trace->nr_entries >= trace->max_entries;
} }
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) /* This must be noinline to so that our skip calculation works correctly */
static noinline void __save_stack_trace(struct task_struct *tsk,
struct stack_trace *trace, unsigned int nosched)
{ {
struct stack_trace_data data; struct stack_trace_data data;
struct stackframe frame; struct stackframe frame;
data.trace = trace; data.trace = trace;
data.last_pc = ULONG_MAX;
data.skip = trace->skip; data.skip = trace->skip;
data.no_sched_functions = nosched;
if (tsk != current) { if (tsk != current) {
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
...@@ -102,7 +128,6 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) ...@@ -102,7 +128,6 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
trace->entries[trace->nr_entries++] = ULONG_MAX; trace->entries[trace->nr_entries++] = ULONG_MAX;
return; return;
#else #else
data.no_sched_functions = 1;
frame.fp = thread_saved_fp(tsk); frame.fp = thread_saved_fp(tsk);
frame.sp = thread_saved_sp(tsk); frame.sp = thread_saved_sp(tsk);
frame.lr = 0; /* recovered from the stack */ frame.lr = 0; /* recovered from the stack */
...@@ -111,11 +136,12 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) ...@@ -111,11 +136,12 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
} else { } else {
register unsigned long current_sp asm ("sp"); register unsigned long current_sp asm ("sp");
data.no_sched_functions = 0; /* We don't want this function nor the caller */
data.skip += 2;
frame.fp = (unsigned long)__builtin_frame_address(0); frame.fp = (unsigned long)__builtin_frame_address(0);
frame.sp = current_sp; frame.sp = current_sp;
frame.lr = (unsigned long)__builtin_return_address(0); frame.lr = (unsigned long)__builtin_return_address(0);
frame.pc = (unsigned long)save_stack_trace_tsk; frame.pc = (unsigned long)__save_stack_trace;
} }
walk_stackframe(&frame, save_trace, &data); walk_stackframe(&frame, save_trace, &data);
...@@ -123,9 +149,33 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace) ...@@ -123,9 +149,33 @@ void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
trace->entries[trace->nr_entries++] = ULONG_MAX; trace->entries[trace->nr_entries++] = ULONG_MAX;
} }
void save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
{
struct stack_trace_data data;
struct stackframe frame;
data.trace = trace;
data.skip = trace->skip;
data.no_sched_functions = 0;
frame.fp = regs->ARM_fp;
frame.sp = regs->ARM_sp;
frame.lr = regs->ARM_lr;
frame.pc = regs->ARM_pc;
walk_stackframe(&frame, save_trace, &data);
if (trace->nr_entries < trace->max_entries)
trace->entries[trace->nr_entries++] = ULONG_MAX;
}
void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
{
__save_stack_trace(tsk, trace, 1);
}
void save_stack_trace(struct stack_trace *trace) void save_stack_trace(struct stack_trace *trace)
{ {
save_stack_trace_tsk(current, trace); __save_stack_trace(current, trace, 0);
} }
EXPORT_SYMBOL_GPL(save_stack_trace); EXPORT_SYMBOL_GPL(save_stack_trace);
#endif #endif
...@@ -91,13 +91,13 @@ static void __init parse_dt_topology(void) ...@@ -91,13 +91,13 @@ static void __init parse_dt_topology(void)
{ {
const struct cpu_efficiency *cpu_eff; const struct cpu_efficiency *cpu_eff;
struct device_node *cn = NULL; struct device_node *cn = NULL;
unsigned long min_capacity = (unsigned long)(-1); unsigned long min_capacity = ULONG_MAX;
unsigned long max_capacity = 0; unsigned long max_capacity = 0;
unsigned long capacity = 0; unsigned long capacity = 0;
int alloc_size, cpu = 0; int cpu = 0;
alloc_size = nr_cpu_ids * sizeof(*__cpu_capacity); __cpu_capacity = kcalloc(nr_cpu_ids, sizeof(*__cpu_capacity),
__cpu_capacity = kzalloc(alloc_size, GFP_NOWAIT); GFP_NOWAIT);
for_each_possible_cpu(cpu) { for_each_possible_cpu(cpu) {
const u32 *rate; const u32 *rate;
......
...@@ -113,6 +113,26 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm, ...@@ -113,6 +113,26 @@ int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe, struct mm_struct *mm,
return 0; return 0;
} }
void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
void *src, unsigned long len)
{
void *xol_page_kaddr = kmap_atomic(page);
void *dst = xol_page_kaddr + (vaddr & ~PAGE_MASK);
preempt_disable();
/* Initialize the slot */
memcpy(dst, src, len);
/* flush caches (dcache/icache) */
flush_uprobe_xol_access(page, vaddr, dst, len);
preempt_enable();
kunmap_atomic(xol_page_kaddr);
}
int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs) int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
{ {
struct uprobe_task *utask = current->utask; struct uprobe_task *utask = current->utask;
......
...@@ -43,19 +43,14 @@ static void __init bcm5301x_init_early(void) ...@@ -43,19 +43,14 @@ static void __init bcm5301x_init_early(void)
"imprecise external abort"); "imprecise external abort");
} }
static void __init bcm5301x_dt_init(void)
{
l2x0_of_init(0, ~0UL);
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
static const char __initconst *bcm5301x_dt_compat[] = { static const char __initconst *bcm5301x_dt_compat[] = {
"brcm,bcm4708", "brcm,bcm4708",
NULL, NULL,
}; };
DT_MACHINE_START(BCM5301X, "BCM5301X") DT_MACHINE_START(BCM5301X, "BCM5301X")
.l2c_aux_val = 0,
.l2c_aux_mask = ~0,
.init_early = bcm5301x_init_early, .init_early = bcm5301x_init_early,
.init_machine = bcm5301x_dt_init,
.dt_compat = bcm5301x_dt_compat, .dt_compat = bcm5301x_dt_compat,
MACHINE_END MACHINE_END
...@@ -18,16 +18,6 @@ ...@@ -18,16 +18,6 @@
#include <asm/hardware/cache-l2x0.h> #include <asm/hardware/cache-l2x0.h>
#include <asm/mach/arch.h> #include <asm/mach/arch.h>
static void __init berlin_init_machine(void)
{
/*
* with DT probing for L2CCs, berlin_init_machine can be removed.
* Note: 88DE3005 (Armada 1500-mini) uses pl310 l2cc
*/
l2x0_of_init(0x70c00000, 0xfeffffff);
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
static const char * const berlin_dt_compat[] = { static const char * const berlin_dt_compat[] = {
"marvell,berlin", "marvell,berlin",
NULL, NULL,
...@@ -35,5 +25,10 @@ static const char * const berlin_dt_compat[] = { ...@@ -35,5 +25,10 @@ static const char * const berlin_dt_compat[] = {
DT_MACHINE_START(BERLIN_DT, "Marvell Berlin") DT_MACHINE_START(BERLIN_DT, "Marvell Berlin")
.dt_compat = berlin_dt_compat, .dt_compat = berlin_dt_compat,
.init_machine = berlin_init_machine, /*
* with DT probing for L2CCs, berlin_init_machine can be removed.
* Note: 88DE3005 (Armada 1500-mini) uses pl310 l2cc
*/
.l2c_aux_val = 0x30c00000,
.l2c_aux_mask = 0xfeffffff,
MACHINE_END MACHINE_END
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/memblock.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/mach-types.h> #include <asm/mach-types.h>
...@@ -26,11 +27,9 @@ ...@@ -26,11 +27,9 @@
#include "common.h" #include "common.h"
static void __init static void __init
fixup_clep7312(struct tag *tags, char **cmdline, struct meminfo *mi) fixup_clep7312(struct tag *tags, char **cmdline)
{ {
mi->nr_banks=1; memblock_add(0xc0000000, 0x01000000);
mi->bank[0].start = 0xc0000000;
mi->bank[0].size = 0x01000000;
} }
MACHINE_START(CLEP7212, "Cirrus Logic 7212/7312") MACHINE_START(CLEP7212, "Cirrus Logic 7212/7312")
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/interrupt.h> #include <linux/interrupt.h>
#include <linux/backlight.h> #include <linux/backlight.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/memblock.h>
#include <linux/mtd/physmap.h> #include <linux/mtd/physmap.h>
#include <linux/mtd/partitions.h> #include <linux/mtd/partitions.h>
...@@ -133,7 +134,7 @@ static void __init edb7211_reserve(void) ...@@ -133,7 +134,7 @@ static void __init edb7211_reserve(void)
} }
static void __init static void __init
fixup_edb7211(struct tag *tags, char **cmdline, struct meminfo *mi) fixup_edb7211(struct tag *tags, char **cmdline)
{ {
/* /*
* Bank start addresses are not present in the information * Bank start addresses are not present in the information
...@@ -143,11 +144,8 @@ fixup_edb7211(struct tag *tags, char **cmdline, struct meminfo *mi) ...@@ -143,11 +144,8 @@ fixup_edb7211(struct tag *tags, char **cmdline, struct meminfo *mi)
* Banks sizes _are_ present in the param block, but we're * Banks sizes _are_ present in the param block, but we're
* not using that information yet. * not using that information yet.
*/ */
mi->bank[0].start = 0xc0000000; memblock_add(0xc0000000, SZ_8M);
mi->bank[0].size = SZ_8M; memblock_add(0xc1000000, SZ_8M);
mi->bank[1].start = 0xc1000000;
mi->bank[1].size = SZ_8M;
mi->nr_banks = 2;
} }
static void __init edb7211_init(void) static void __init edb7211_init(void)
......
...@@ -295,7 +295,7 @@ static struct generic_bl_info p720t_lcd_backlight_pdata = { ...@@ -295,7 +295,7 @@ static struct generic_bl_info p720t_lcd_backlight_pdata = {
}; };
static void __init static void __init
fixup_p720t(struct tag *tag, char **cmdline, struct meminfo *mi) fixup_p720t(struct tag *tag, char **cmdline)
{ {
/* /*
* Our bootloader doesn't setup any tags (yet). * Our bootloader doesn't setup any tags (yet).
......
...@@ -272,9 +272,9 @@ void __init cns3xxx_l2x0_init(void) ...@@ -272,9 +272,9 @@ void __init cns3xxx_l2x0_init(void)
* *
* 1 cycle of latency for setup, read and write accesses * 1 cycle of latency for setup, read and write accesses
*/ */
val = readl(base + L2X0_TAG_LATENCY_CTRL); val = readl(base + L310_TAG_LATENCY_CTRL);
val &= 0xfffff888; val &= 0xfffff888;
writel(val, base + L2X0_TAG_LATENCY_CTRL); writel(val, base + L310_TAG_LATENCY_CTRL);
/* /*
* Data RAM Control register * Data RAM Control register
...@@ -285,12 +285,12 @@ void __init cns3xxx_l2x0_init(void) ...@@ -285,12 +285,12 @@ void __init cns3xxx_l2x0_init(void)
* *
* 1 cycle of latency for setup, read and write accesses * 1 cycle of latency for setup, read and write accesses
*/ */
val = readl(base + L2X0_DATA_LATENCY_CTRL); val = readl(base + L310_DATA_LATENCY_CTRL);
val &= 0xfffff888; val &= 0xfffff888;
writel(val, base + L2X0_DATA_LATENCY_CTRL); writel(val, base + L310_DATA_LATENCY_CTRL);
/* 32 KiB, 8-way, parity disable */ /* 32 KiB, 8-way, parity disable */
l2x0_init(base, 0x00540000, 0xfe000fff); l2x0_init(base, 0x00500000, 0xfe0f0fff);
} }
#endif /* CONFIG_CACHE_L2X0 */ #endif /* CONFIG_CACHE_L2X0 */
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <asm/ptrace.h> #include <asm/ptrace.h>
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/asm-offsets.h> #include <asm/asm-offsets.h>
#include <asm/assembler.h>
#include <mach/ep93xx-regs.h> #include <mach/ep93xx-regs.h>
/* /*
...@@ -62,14 +63,16 @@ ...@@ -62,14 +63,16 @@
* r9 = ret_from_exception * r9 = ret_from_exception
* lr = undefined instr exit * lr = undefined instr exit
* *
* called from prefetch exception handler with interrupts disabled * called from prefetch exception handler with interrupts enabled
*/ */
ENTRY(crunch_task_enable) ENTRY(crunch_task_enable)
inc_preempt_count r10, r3
ldr r8, =(EP93XX_APB_VIRT_BASE + 0x00130000) @ syscon addr ldr r8, =(EP93XX_APB_VIRT_BASE + 0x00130000) @ syscon addr
ldr r1, [r8, #0x80] ldr r1, [r8, #0x80]
tst r1, #0x00800000 @ access to crunch enabled? tst r1, #0x00800000 @ access to crunch enabled?
movne pc, lr @ if so no business here bne 2f @ if so no business here
mov r3, #0xaa @ unlock syscon swlock mov r3, #0xaa @ unlock syscon swlock
str r3, [r8, #0xc0] str r3, [r8, #0xc0]
orr r1, r1, #0x00800000 @ enable access to crunch orr r1, r1, #0x00800000 @ enable access to crunch
...@@ -142,7 +145,7 @@ crunch_save: ...@@ -142,7 +145,7 @@ crunch_save:
teq r0, #0 @ anything to load? teq r0, #0 @ anything to load?
cfldr64eq mvdx0, [r1, #CRUNCH_MVDX0] @ mvdx0 was clobbered cfldr64eq mvdx0, [r1, #CRUNCH_MVDX0] @ mvdx0 was clobbered
moveq pc, lr beq 1f
crunch_load: crunch_load:
cfldr64 mvdx0, [r0, #CRUNCH_DSPSC] @ load status word cfldr64 mvdx0, [r0, #CRUNCH_DSPSC] @ load status word
...@@ -190,6 +193,11 @@ crunch_load: ...@@ -190,6 +193,11 @@ crunch_load:
cfldr64 mvdx14, [r0, #CRUNCH_MVDX14] cfldr64 mvdx14, [r0, #CRUNCH_MVDX14]
cfldr64 mvdx15, [r0, #CRUNCH_MVDX15] cfldr64 mvdx15, [r0, #CRUNCH_MVDX15]
1:
#ifdef CONFIG_PREEMPT_COUNT
get_thread_info r10
#endif
2: dec_preempt_count r10, r3
mov pc, lr mov pc, lr
/* /*
......
...@@ -153,7 +153,6 @@ enum sys_powerdown { ...@@ -153,7 +153,6 @@ enum sys_powerdown {
NUM_SYS_POWERDOWN, NUM_SYS_POWERDOWN,
}; };
extern unsigned long l2x0_regs_phys;
struct exynos_pmu_conf { struct exynos_pmu_conf {
void __iomem *reg; void __iomem *reg;
unsigned int val[NUM_SYS_POWERDOWN]; unsigned int val[NUM_SYS_POWERDOWN];
......
...@@ -30,9 +30,6 @@ ...@@ -30,9 +30,6 @@
#include "mfc.h" #include "mfc.h"
#include "regs-pmu.h" #include "regs-pmu.h"
#define L2_AUX_VAL 0x7C470001
#define L2_AUX_MASK 0xC200ffff
static struct map_desc exynos4_iodesc[] __initdata = { static struct map_desc exynos4_iodesc[] __initdata = {
{ {
.virtual = (unsigned long)S3C_VA_SYS, .virtual = (unsigned long)S3C_VA_SYS,
...@@ -246,25 +243,6 @@ void __init exynos_init_io(void) ...@@ -246,25 +243,6 @@ void __init exynos_init_io(void)
exynos_map_io(); exynos_map_io();
} }
static int __init exynos4_l2x0_cache_init(void)
{
int ret;
if (!soc_is_exynos4())
return 0;
ret = l2x0_of_init(L2_AUX_VAL, L2_AUX_MASK);
if (ret)
return ret;
if (IS_ENABLED(CONFIG_S5P_SLEEP)) {
l2x0_regs_phys = virt_to_phys(&l2x0_saved_regs);
clean_dcache_area(&l2x0_regs_phys, sizeof(unsigned long));
}
return 0;
}
early_initcall(exynos4_l2x0_cache_init);
static void __init exynos_dt_machine_init(void) static void __init exynos_dt_machine_init(void)
{ {
struct device_node *i2c_np; struct device_node *i2c_np;
...@@ -333,6 +311,8 @@ static void __init exynos_reserve(void) ...@@ -333,6 +311,8 @@ static void __init exynos_reserve(void)
DT_MACHINE_START(EXYNOS_DT, "SAMSUNG EXYNOS (Flattened Device Tree)") DT_MACHINE_START(EXYNOS_DT, "SAMSUNG EXYNOS (Flattened Device Tree)")
/* Maintainer: Thomas Abraham <thomas.abraham@linaro.org> */ /* Maintainer: Thomas Abraham <thomas.abraham@linaro.org> */
/* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */ /* Maintainer: Kukjin Kim <kgene.kim@samsung.com> */
.l2c_aux_val = 0x3c400001,
.l2c_aux_mask = 0xc20fffff,
.smp = smp_ops(exynos_smp_ops), .smp = smp_ops(exynos_smp_ops),
.map_io = exynos_init_io, .map_io = exynos_init_io,
.init_early = exynos_firmware_init, .init_early = exynos_firmware_init,
......
...@@ -16,8 +16,6 @@ ...@@ -16,8 +16,6 @@
*/ */
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/asm-offsets.h>
#include <asm/hardware/cache-l2x0.h>
#define CPU_MASK 0xff0ffff0 #define CPU_MASK 0xff0ffff0
#define CPU_CORTEX_A9 0x410fc090 #define CPU_CORTEX_A9 0x410fc090
...@@ -53,33 +51,7 @@ ENTRY(exynos_cpu_resume) ...@@ -53,33 +51,7 @@ ENTRY(exynos_cpu_resume)
and r0, r0, r1 and r0, r0, r1
ldr r1, =CPU_CORTEX_A9 ldr r1, =CPU_CORTEX_A9
cmp r0, r1 cmp r0, r1
bne skip_l2_resume bleq l2c310_early_resume
adr r0, l2x0_regs_phys
ldr r0, [r0]
cmp r0, #0
beq skip_l2_resume
ldr r1, [r0, #L2X0_R_PHY_BASE]
ldr r2, [r1, #L2X0_CTRL]
tst r2, #0x1
bne skip_l2_resume
ldr r2, [r0, #L2X0_R_AUX_CTRL]
str r2, [r1, #L2X0_AUX_CTRL]
ldr r2, [r0, #L2X0_R_TAG_LATENCY]
str r2, [r1, #L2X0_TAG_LATENCY_CTRL]
ldr r2, [r0, #L2X0_R_DATA_LATENCY]
str r2, [r1, #L2X0_DATA_LATENCY_CTRL]
ldr r2, [r0, #L2X0_R_PREFETCH_CTRL]
str r2, [r1, #L2X0_PREFETCH_CTRL]
ldr r2, [r0, #L2X0_R_PWR_CTRL]
str r2, [r1, #L2X0_POWER_CTRL]
mov r2, #1
str r2, [r1, #L2X0_CTRL]
skip_l2_resume:
#endif #endif
b cpu_resume b cpu_resume
ENDPROC(exynos_cpu_resume) ENDPROC(exynos_cpu_resume)
#ifdef CONFIG_CACHE_L2X0
.globl l2x0_regs_phys
l2x0_regs_phys:
.long 0
#endif
...@@ -76,7 +76,7 @@ __initcall(cats_hw_init); ...@@ -76,7 +76,7 @@ __initcall(cats_hw_init);
* hard reboots fail on early boards. * hard reboots fail on early boards.
*/ */
static void __init static void __init
fixup_cats(struct tag *tags, char **cmdline, struct meminfo *mi) fixup_cats(struct tag *tags, char **cmdline)
{ {
#if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE) #if defined(CONFIG_VGA_CONSOLE) || defined(CONFIG_DUMMY_CONSOLE)
screen_info.orig_video_lines = 25; screen_info.orig_video_lines = 25;
......
...@@ -620,7 +620,7 @@ __initcall(nw_hw_init); ...@@ -620,7 +620,7 @@ __initcall(nw_hw_init);
* the parameter page. * the parameter page.
*/ */
static void __init static void __init
fixup_netwinder(struct tag *tags, char **cmdline, struct meminfo *mi) fixup_netwinder(struct tag *tags, char **cmdline)
{ {
#ifdef CONFIG_ISAPNP #ifdef CONFIG_ISAPNP
extern int isapnp_disable; extern int isapnp_disable;
......
...@@ -51,11 +51,13 @@ static void __init highbank_scu_map_io(void) ...@@ -51,11 +51,13 @@ static void __init highbank_scu_map_io(void)
} }
static void highbank_l2x0_disable(void) static void highbank_l2c310_write_sec(unsigned long val, unsigned reg)
{ {
outer_flush_all(); if (reg == L2X0_CTRL)
/* Disable PL310 L2 Cache controller */ highbank_smc1(0x102, val);
highbank_smc1(0x102, 0x0); else
WARN_ONCE(1, "Highbank L2C310: ignoring write to reg 0x%x\n",
reg);
} }
static void __init highbank_init_irq(void) static void __init highbank_init_irq(void)
...@@ -64,14 +66,6 @@ static void __init highbank_init_irq(void) ...@@ -64,14 +66,6 @@ static void __init highbank_init_irq(void)
if (of_find_compatible_node(NULL, NULL, "arm,cortex-a9")) if (of_find_compatible_node(NULL, NULL, "arm,cortex-a9"))
highbank_scu_map_io(); highbank_scu_map_io();
/* Enable PL310 L2 Cache controller */
if (IS_ENABLED(CONFIG_CACHE_L2X0) &&
of_find_compatible_node(NULL, NULL, "arm,pl310-cache")) {
highbank_smc1(0x102, 0x1);
l2x0_of_init(0, ~0UL);
outer_cache.disable = highbank_l2x0_disable;
}
} }
static void highbank_power_off(void) static void highbank_power_off(void)
...@@ -185,6 +179,9 @@ DT_MACHINE_START(HIGHBANK, "Highbank") ...@@ -185,6 +179,9 @@ DT_MACHINE_START(HIGHBANK, "Highbank")
#if defined(CONFIG_ZONE_DMA) && defined(CONFIG_ARM_LPAE) #if defined(CONFIG_ZONE_DMA) && defined(CONFIG_ARM_LPAE)
.dma_zone_size = (4ULL * SZ_1G), .dma_zone_size = (4ULL * SZ_1G),
#endif #endif
.l2c_aux_val = 0,
.l2c_aux_mask = ~0,
.l2c_write_sec = highbank_l2c310_write_sec,
.init_irq = highbank_init_irq, .init_irq = highbank_init_irq,
.init_machine = highbank_init, .init_machine = highbank_init,
.dt_compat = highbank_match, .dt_compat = highbank_match,
......
...@@ -20,19 +20,14 @@ static void __init vf610_init_machine(void) ...@@ -20,19 +20,14 @@ static void __init vf610_init_machine(void)
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
} }
static void __init vf610_init_irq(void)
{
l2x0_of_init(0, ~0UL);
irqchip_init();
}
static const char *vf610_dt_compat[] __initconst = { static const char *vf610_dt_compat[] __initconst = {
"fsl,vf610", "fsl,vf610",
NULL, NULL,
}; };
DT_MACHINE_START(VYBRID_VF610, "Freescale Vybrid VF610 (Device Tree)") DT_MACHINE_START(VYBRID_VF610, "Freescale Vybrid VF610 (Device Tree)")
.init_irq = vf610_init_irq, .l2c_aux_val = 0,
.l2c_aux_mask = ~0,
.init_machine = vf610_init_machine, .init_machine = vf610_init_machine,
.dt_compat = vf610_dt_compat, .dt_compat = vf610_dt_compat,
.restart = mxc_restart, .restart = mxc_restart,
......
...@@ -334,28 +334,10 @@ ENDPROC(imx6_suspend) ...@@ -334,28 +334,10 @@ ENDPROC(imx6_suspend)
* turned into relative ones. * turned into relative ones.
*/ */
#ifdef CONFIG_CACHE_L2X0
.macro pl310_resume
adr r0, l2x0_saved_regs_offset
ldr r2, [r0]
add r2, r2, r0
ldr r0, [r2, #L2X0_R_PHY_BASE] @ get physical base of l2x0
ldr r1, [r2, #L2X0_R_AUX_CTRL] @ get aux_ctrl value
str r1, [r0, #L2X0_AUX_CTRL] @ restore aux_ctrl
mov r1, #0x1
str r1, [r0, #L2X0_CTRL] @ re-enable L2
.endm
l2x0_saved_regs_offset:
.word l2x0_saved_regs - .
#else
.macro pl310_resume
.endm
#endif
ENTRY(v7_cpu_resume) ENTRY(v7_cpu_resume)
bl v7_invalidate_l1 bl v7_invalidate_l1
pl310_resume #ifdef CONFIG_CACHE_L2X0
bl l2c310_early_resume
#endif
b cpu_resume b cpu_resume
ENDPROC(v7_cpu_resume) ENDPROC(v7_cpu_resume)
...@@ -124,7 +124,7 @@ void __init imx_init_l2cache(void) ...@@ -124,7 +124,7 @@ void __init imx_init_l2cache(void)
} }
/* Configure the L2 PREFETCH and POWER registers */ /* Configure the L2 PREFETCH and POWER registers */
val = readl_relaxed(l2x0_base + L2X0_PREFETCH_CTRL); val = readl_relaxed(l2x0_base + L310_PREFETCH_CTRL);
val |= 0x70800000; val |= 0x70800000;
/* /*
* The L2 cache controller(PL310) version on the i.MX6D/Q is r3p1-50rel0 * The L2 cache controller(PL310) version on the i.MX6D/Q is r3p1-50rel0
...@@ -137,14 +137,12 @@ void __init imx_init_l2cache(void) ...@@ -137,14 +137,12 @@ void __init imx_init_l2cache(void)
*/ */
if (cpu_is_imx6q()) if (cpu_is_imx6q())
val &= ~(1 << 30 | 1 << 23); val &= ~(1 << 30 | 1 << 23);
writel_relaxed(val, l2x0_base + L2X0_PREFETCH_CTRL); writel_relaxed(val, l2x0_base + L310_PREFETCH_CTRL);
val = L2X0_DYNAMIC_CLK_GATING_EN | L2X0_STNDBY_MODE_EN;
writel_relaxed(val, l2x0_base + L2X0_POWER_CTRL);
iounmap(l2x0_base); iounmap(l2x0_base);
of_node_put(np); of_node_put(np);
out: out:
l2x0_of_init(0, ~0UL); l2x0_of_init(0, ~0);
} }
#endif #endif
...@@ -83,11 +83,6 @@ static void __init halibut_init(void) ...@@ -83,11 +83,6 @@ static void __init halibut_init(void)
platform_add_devices(devices, ARRAY_SIZE(devices)); platform_add_devices(devices, ARRAY_SIZE(devices));
} }
static void __init halibut_fixup(struct tag *tags, char **cmdline,
struct meminfo *mi)
{
}
static void __init halibut_map_io(void) static void __init halibut_map_io(void)
{ {
msm_map_common_io(); msm_map_common_io();
...@@ -100,7 +95,6 @@ static void __init halibut_init_late(void) ...@@ -100,7 +95,6 @@ static void __init halibut_init_late(void)
MACHINE_START(HALIBUT, "Halibut Board (QCT SURF7200A)") MACHINE_START(HALIBUT, "Halibut Board (QCT SURF7200A)")
.atag_offset = 0x100, .atag_offset = 0x100,
.fixup = halibut_fixup,
.map_io = halibut_map_io, .map_io = halibut_map_io,
.init_early = halibut_init_early, .init_early = halibut_init_early,
.init_irq = halibut_init_irq, .init_irq = halibut_init_irq,
......
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <linux/io.h> #include <linux/io.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/memblock.h>
#include <asm/mach-types.h> #include <asm/mach-types.h>
#include <asm/mach/arch.h> #include <asm/mach/arch.h>
...@@ -52,16 +53,10 @@ static void __init mahimahi_init(void) ...@@ -52,16 +53,10 @@ static void __init mahimahi_init(void)
platform_add_devices(devices, ARRAY_SIZE(devices)); platform_add_devices(devices, ARRAY_SIZE(devices));
} }
static void __init mahimahi_fixup(struct tag *tags, char **cmdline, static void __init mahimahi_fixup(struct tag *tags, char **cmdline)
struct meminfo *mi)
{ {
mi->nr_banks = 2; memblock_add(PHYS_OFFSET, 219*SZ_1M);
mi->bank[0].start = PHYS_OFFSET; memblock_add(MSM_HIGHMEM_BASE, MSM_HIGHMEM_SIZE);
mi->bank[0].node = PHYS_TO_NID(PHYS_OFFSET);
mi->bank[0].size = (219*1024*1024);
mi->bank[1].start = MSM_HIGHMEM_BASE;
mi->bank[1].node = PHYS_TO_NID(MSM_HIGHMEM_BASE);
mi->bank[1].size = MSM_HIGHMEM_SIZE;
} }
static void __init mahimahi_map_io(void) static void __init mahimahi_map_io(void)
......
...@@ -40,8 +40,7 @@ ...@@ -40,8 +40,7 @@
#include "proc_comm.h" #include "proc_comm.h"
#include "common.h" #include "common.h"
static void __init msm7x30_fixup(struct tag *tag, char **cmdline, static void __init msm7x30_fixup(struct tag *tag, char **cmdline)
struct meminfo *mi)
{ {
for (; tag->hdr.size; tag = tag_next(tag)) for (; tag->hdr.size; tag = tag_next(tag))
if (tag->hdr.tag == ATAG_MEM && tag->u.mem.start == 0x200000) { if (tag->hdr.tag == ATAG_MEM && tag->u.mem.start == 0x200000) {
......
...@@ -35,6 +35,7 @@ ...@@ -35,6 +35,7 @@
#include <linux/mtd/nand.h> #include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h> #include <linux/mtd/partitions.h>
#include <linux/memblock.h>
#include "gpio_chip.h" #include "gpio_chip.h"
#include "board-sapphire.h" #include "board-sapphire.h"
...@@ -74,22 +75,18 @@ static struct map_desc sapphire_io_desc[] __initdata = { ...@@ -74,22 +75,18 @@ static struct map_desc sapphire_io_desc[] __initdata = {
} }
}; };
static void __init sapphire_fixup(struct tag *tags, char **cmdline, static void __init sapphire_fixup(struct tag *tags, char **cmdline)
struct meminfo *mi)
{ {
int smi_sz = parse_tag_smi((const struct tag *)tags); int smi_sz = parse_tag_smi((const struct tag *)tags);
mi->nr_banks = 1;
mi->bank[0].start = PHYS_OFFSET;
mi->bank[0].node = PHYS_TO_NID(PHYS_OFFSET);
if (smi_sz == 32) { if (smi_sz == 32) {
mi->bank[0].size = (84*1024*1024); memblock_add(PHYS_OFFSET, 84*SZ_1M);
} else if (smi_sz == 64) { } else if (smi_sz == 64) {
mi->bank[0].size = (101*1024*1024); memblock_add(PHYS_OFFSET, 101*SZ_1M);
} else { } else {
memblock_add(PHYS_OFFSET, 101*SZ_1M);
/* Give a default value when not get smi size */ /* Give a default value when not get smi size */
smi_sz = 64; smi_sz = 64;
mi->bank[0].size = (101*1024*1024);
} }
} }
......
...@@ -19,6 +19,7 @@ ...@@ -19,6 +19,7 @@
#include <linux/init.h> #include <linux/init.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/clkdev.h> #include <linux/clkdev.h>
#include <linux/memblock.h>
#include <asm/system_info.h> #include <asm/system_info.h>
#include <asm/mach-types.h> #include <asm/mach-types.h>
...@@ -55,12 +56,9 @@ static void __init trout_init_irq(void) ...@@ -55,12 +56,9 @@ static void __init trout_init_irq(void)
msm_init_irq(); msm_init_irq();
} }
static void __init trout_fixup(struct tag *tags, char **cmdline, static void __init trout_fixup(struct tag *tags, char **cmdline)
struct meminfo *mi)
{ {
mi->nr_banks = 1; memblock_add(PHYS_OFFSET, 101*SZ_1M);
mi->bank[0].start = PHYS_OFFSET;
mi->bank[0].size = (101*1024*1024);
} }
static void __init trout_init(void) static void __init trout_init(void)
......
...@@ -182,6 +182,8 @@ static const char * const armada_370_xp_dt_compat[] = { ...@@ -182,6 +182,8 @@ static const char * const armada_370_xp_dt_compat[] = {
}; };
DT_MACHINE_START(ARMADA_370_XP_DT, "Marvell Armada 370/XP (Device Tree)") DT_MACHINE_START(ARMADA_370_XP_DT, "Marvell Armada 370/XP (Device Tree)")
.l2c_aux_val = 0,
.l2c_aux_mask = ~0,
.smp = smp_ops(armada_xp_smp_ops), .smp = smp_ops(armada_xp_smp_ops),
.init_machine = mvebu_dt_init, .init_machine = mvebu_dt_init,
.init_time = mvebu_timer_and_clk_init, .init_time = mvebu_timer_and_clk_init,
...@@ -195,6 +197,8 @@ static const char * const armada_375_dt_compat[] = { ...@@ -195,6 +197,8 @@ static const char * const armada_375_dt_compat[] = {
}; };
DT_MACHINE_START(ARMADA_375_DT, "Marvell Armada 375 (Device Tree)") DT_MACHINE_START(ARMADA_375_DT, "Marvell Armada 375 (Device Tree)")
.l2c_aux_val = 0,
.l2c_aux_mask = ~0,
.init_time = mvebu_timer_and_clk_init, .init_time = mvebu_timer_and_clk_init,
.init_machine = mvebu_dt_init, .init_machine = mvebu_dt_init,
.restart = mvebu_restart, .restart = mvebu_restart,
...@@ -208,6 +212,8 @@ static const char * const armada_38x_dt_compat[] = { ...@@ -208,6 +212,8 @@ static const char * const armada_38x_dt_compat[] = {
}; };
DT_MACHINE_START(ARMADA_38X_DT, "Marvell Armada 380/385 (Device Tree)") DT_MACHINE_START(ARMADA_38X_DT, "Marvell Armada 380/385 (Device Tree)")
.l2c_aux_val = 0,
.l2c_aux_mask = ~0,
.init_time = mvebu_timer_and_clk_init, .init_time = mvebu_timer_and_clk_init,
.restart = mvebu_restart, .restart = mvebu_restart,
.dt_compat = armada_38x_dt_compat, .dt_compat = armada_38x_dt_compat,
......
...@@ -143,23 +143,16 @@ static int __init cpu8815_mmcsd_init(void) ...@@ -143,23 +143,16 @@ static int __init cpu8815_mmcsd_init(void)
} }
device_initcall(cpu8815_mmcsd_init); device_initcall(cpu8815_mmcsd_init);
static void __init cpu8815_init_of(void)
{
#ifdef CONFIG_CACHE_L2X0
/* At full speed latency must be >=2, so 0x249 in low bits */
l2x0_of_init(0x00730249, 0xfe000fff);
#endif
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
static const char * cpu8815_board_compat[] = { static const char * cpu8815_board_compat[] = {
"calaosystems,usb-s8815", "calaosystems,usb-s8815",
NULL, NULL,
}; };
DT_MACHINE_START(NOMADIK_DT, "Nomadik STn8815") DT_MACHINE_START(NOMADIK_DT, "Nomadik STn8815")
/* At full speed latency must be >=2, so 0x249 in low bits */
.l2c_aux_val = 0x00700249,
.l2c_aux_mask = 0xfe0fefff,
.map_io = cpu8815_map_io, .map_io = cpu8815_map_io,
.init_machine = cpu8815_init_of,
.restart = cpu8815_restart, .restart = cpu8815_restart,
.dt_compat = cpu8815_board_compat, .dt_compat = cpu8815_board_compat,
MACHINE_END MACHINE_END
...@@ -65,6 +65,7 @@ config SOC_AM43XX ...@@ -65,6 +65,7 @@ config SOC_AM43XX
select ARCH_HAS_OPP select ARCH_HAS_OPP
select ARM_GIC select ARM_GIC
select MACH_OMAP_GENERIC select MACH_OMAP_GENERIC
select MIGHT_HAVE_CACHE_L2X0
config SOC_DRA7XX config SOC_DRA7XX
bool "TI DRA7XX" bool "TI DRA7XX"
......
...@@ -91,6 +91,7 @@ extern void omap3_sync32k_timer_init(void); ...@@ -91,6 +91,7 @@ extern void omap3_sync32k_timer_init(void);
extern void omap3_secure_sync32k_timer_init(void); extern void omap3_secure_sync32k_timer_init(void);
extern void omap3_gptimer_timer_init(void); extern void omap3_gptimer_timer_init(void);
extern void omap4_local_timer_init(void); extern void omap4_local_timer_init(void);
int omap_l2_cache_init(void);
extern void omap5_realtime_timer_init(void); extern void omap5_realtime_timer_init(void);
void omap2420_init_early(void); void omap2420_init_early(void);
......
...@@ -609,6 +609,7 @@ void __init am43xx_init_early(void) ...@@ -609,6 +609,7 @@ void __init am43xx_init_early(void)
am43xx_clockdomains_init(); am43xx_clockdomains_init();
am43xx_hwmod_init(); am43xx_hwmod_init();
omap_hwmod_init_postsetup(); omap_hwmod_init_postsetup();
omap_l2_cache_init();
omap_clk_soc_init = am43xx_dt_clk_init; omap_clk_soc_init = am43xx_dt_clk_init;
} }
...@@ -640,6 +641,7 @@ void __init omap4430_init_early(void) ...@@ -640,6 +641,7 @@ void __init omap4430_init_early(void)
omap44xx_clockdomains_init(); omap44xx_clockdomains_init();
omap44xx_hwmod_init(); omap44xx_hwmod_init();
omap_hwmod_init_postsetup(); omap_hwmod_init_postsetup();
omap_l2_cache_init();
omap_clk_soc_init = omap4xxx_dt_clk_init; omap_clk_soc_init = omap4xxx_dt_clk_init;
} }
......
...@@ -187,19 +187,15 @@ static void l2x0_pwrst_prepare(unsigned int cpu_id, unsigned int save_state) ...@@ -187,19 +187,15 @@ static void l2x0_pwrst_prepare(unsigned int cpu_id, unsigned int save_state)
* in every restore MPUSS OFF path. * in every restore MPUSS OFF path.
*/ */
#ifdef CONFIG_CACHE_L2X0 #ifdef CONFIG_CACHE_L2X0
static void save_l2x0_context(void) static void __init save_l2x0_context(void)
{ {
u32 val; writel_relaxed(l2x0_saved_regs.aux_ctrl,
void __iomem *l2x0_base = omap4_get_l2cache_base(); sar_base + L2X0_AUXCTRL_OFFSET);
if (l2x0_base) { writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
val = readl_relaxed(l2x0_base + L2X0_AUX_CTRL); sar_base + L2X0_PREFETCH_CTRL_OFFSET);
writel_relaxed(val, sar_base + L2X0_AUXCTRL_OFFSET);
val = readl_relaxed(l2x0_base + L2X0_PREFETCH_CTRL);
writel_relaxed(val, sar_base + L2X0_PREFETCH_CTRL_OFFSET);
}
} }
#else #else
static void save_l2x0_context(void) static void __init save_l2x0_context(void)
{} {}
#endif #endif
......
...@@ -167,75 +167,57 @@ void __iomem *omap4_get_l2cache_base(void) ...@@ -167,75 +167,57 @@ void __iomem *omap4_get_l2cache_base(void)
return l2cache_base; return l2cache_base;
} }
static void omap4_l2x0_disable(void) static void omap4_l2c310_write_sec(unsigned long val, unsigned reg)
{ {
outer_flush_all(); unsigned smc_op;
/* Disable PL310 L2 Cache controller */
omap_smc1(0x102, 0x0);
}
static void omap4_l2x0_set_debug(unsigned long val) switch (reg) {
{ case L2X0_CTRL:
/* Program PL310 L2 Cache controller debug register */ smc_op = OMAP4_MON_L2X0_CTRL_INDEX;
omap_smc1(0x100, val); break;
case L2X0_AUX_CTRL:
smc_op = OMAP4_MON_L2X0_AUXCTRL_INDEX;
break;
case L2X0_DEBUG_CTRL:
smc_op = OMAP4_MON_L2X0_DBG_CTRL_INDEX;
break;
case L310_PREFETCH_CTRL:
smc_op = OMAP4_MON_L2X0_PREFETCH_INDEX;
break;
default:
WARN_ONCE(1, "OMAP L2C310: ignoring write to reg 0x%x\n", reg);
return;
}
omap_smc1(smc_op, val);
} }
static int __init omap_l2_cache_init(void) int __init omap_l2_cache_init(void)
{ {
u32 aux_ctrl = 0; u32 aux_ctrl;
/*
* To avoid code running on other OMAPs in
* multi-omap builds
*/
if (!cpu_is_omap44xx())
return -ENODEV;
/* Static mapping, never released */ /* Static mapping, never released */
l2cache_base = ioremap(OMAP44XX_L2CACHE_BASE, SZ_4K); l2cache_base = ioremap(OMAP44XX_L2CACHE_BASE, SZ_4K);
if (WARN_ON(!l2cache_base)) if (WARN_ON(!l2cache_base))
return -ENOMEM; return -ENOMEM;
/* /* 16-way associativity, parity disabled, way size - 64KB (es2.0 +) */
* 16-way associativity, parity disabled aux_ctrl = L2C_AUX_CTRL_SHARED_OVERRIDE |
* Way size - 32KB (es1.0) L310_AUX_CTRL_DATA_PREFETCH |
* Way size - 64KB (es2.0 +) L310_AUX_CTRL_INSTR_PREFETCH;
*/
aux_ctrl = ((1 << L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT) |
(0x1 << 25) |
(0x1 << L2X0_AUX_CTRL_NS_LOCKDOWN_SHIFT) |
(0x1 << L2X0_AUX_CTRL_NS_INT_CTRL_SHIFT));
if (omap_rev() == OMAP4430_REV_ES1_0) {
aux_ctrl |= 0x2 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT;
} else {
aux_ctrl |= ((0x3 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT) |
(1 << L2X0_AUX_CTRL_SHARE_OVERRIDE_SHIFT) |
(1 << L2X0_AUX_CTRL_DATA_PREFETCH_SHIFT) |
(1 << L2X0_AUX_CTRL_INSTR_PREFETCH_SHIFT) |
(1 << L2X0_AUX_CTRL_EARLY_BRESP_SHIFT));
}
if (omap_rev() != OMAP4430_REV_ES1_0)
omap_smc1(0x109, aux_ctrl);
/* Enable PL310 L2 Cache controller */
omap_smc1(0x102, 0x1);
outer_cache.write_sec = omap4_l2c310_write_sec;
if (of_have_populated_dt()) if (of_have_populated_dt())
l2x0_of_init(aux_ctrl, L2X0_AUX_CTRL_MASK); l2x0_of_init(aux_ctrl, 0xcf9fffff);
else else
l2x0_init(l2cache_base, aux_ctrl, L2X0_AUX_CTRL_MASK); l2x0_init(l2cache_base, aux_ctrl, 0xcf9fffff);
/*
* Override default outer_cache.disable with a OMAP4
* specific one
*/
outer_cache.disable = omap4_l2x0_disable;
outer_cache.set_debug = omap4_l2x0_set_debug;
return 0; return 0;
} }
omap_early_initcall(omap_l2_cache_init);
#endif #endif
void __iomem *omap4_get_sar_ram_base(void) void __iomem *omap4_get_sar_ram_base(void)
......
...@@ -365,8 +365,7 @@ void orion5x_restart(enum reboot_mode mode, const char *cmd) ...@@ -365,8 +365,7 @@ void orion5x_restart(enum reboot_mode mode, const char *cmd)
* Many orion-based systems have buggy bootloader implementations. * Many orion-based systems have buggy bootloader implementations.
* This is a common fixup for bogus memory tags. * This is a common fixup for bogus memory tags.
*/ */
void __init tag_fixup_mem32(struct tag *t, char **from, void __init tag_fixup_mem32(struct tag *t, char **from)
struct meminfo *meminfo)
{ {
for (; t->hdr.size; t = tag_next(t)) for (; t->hdr.size; t = tag_next(t))
if (t->hdr.tag == ATAG_MEM && if (t->hdr.tag == ATAG_MEM &&
......
...@@ -64,9 +64,8 @@ int orion5x_pci_sys_setup(int nr, struct pci_sys_data *sys); ...@@ -64,9 +64,8 @@ int orion5x_pci_sys_setup(int nr, struct pci_sys_data *sys);
struct pci_bus *orion5x_pci_sys_scan_bus(int nr, struct pci_sys_data *sys); struct pci_bus *orion5x_pci_sys_scan_bus(int nr, struct pci_sys_data *sys);
int orion5x_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin); int orion5x_pci_map_irq(const struct pci_dev *dev, u8 slot, u8 pin);
struct meminfo;
struct tag; struct tag;
extern void __init tag_fixup_mem32(struct tag *, char **, struct meminfo *); extern void __init tag_fixup_mem32(struct tag *, char **);
#ifdef CONFIG_MACH_MSS2_DT #ifdef CONFIG_MACH_MSS2_DT
extern void mss2_init(void); extern void mss2_init(void);
......
...@@ -2,7 +2,6 @@ obj-y += rstc.o ...@@ -2,7 +2,6 @@ obj-y += rstc.o
obj-y += common.o obj-y += common.o
obj-y += rtciobrg.o obj-y += rtciobrg.o
obj-$(CONFIG_DEBUG_LL) += lluart.o obj-$(CONFIG_DEBUG_LL) += lluart.o
obj-$(CONFIG_CACHE_L2X0) += l2x0.o
obj-$(CONFIG_SUSPEND) += pm.o sleep.o obj-$(CONFIG_SUSPEND) += pm.o sleep.o
obj-$(CONFIG_SMP) += platsmp.o headsmp.o obj-$(CONFIG_SMP) += platsmp.o headsmp.o
obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o obj-$(CONFIG_HOTPLUG_CPU) += hotplug.o
......
...@@ -34,6 +34,8 @@ static const char *atlas6_dt_match[] __initconst = { ...@@ -34,6 +34,8 @@ static const char *atlas6_dt_match[] __initconst = {
DT_MACHINE_START(ATLAS6_DT, "Generic ATLAS6 (Flattened Device Tree)") DT_MACHINE_START(ATLAS6_DT, "Generic ATLAS6 (Flattened Device Tree)")
/* Maintainer: Barry Song <baohua.song@csr.com> */ /* Maintainer: Barry Song <baohua.song@csr.com> */
.l2c_aux_val = 0,
.l2c_aux_mask = ~0,
.map_io = sirfsoc_map_io, .map_io = sirfsoc_map_io,
.init_late = sirfsoc_init_late, .init_late = sirfsoc_init_late,
.dt_compat = atlas6_dt_match, .dt_compat = atlas6_dt_match,
...@@ -48,6 +50,8 @@ static const char *prima2_dt_match[] __initconst = { ...@@ -48,6 +50,8 @@ static const char *prima2_dt_match[] __initconst = {
DT_MACHINE_START(PRIMA2_DT, "Generic PRIMA2 (Flattened Device Tree)") DT_MACHINE_START(PRIMA2_DT, "Generic PRIMA2 (Flattened Device Tree)")
/* Maintainer: Barry Song <baohua.song@csr.com> */ /* Maintainer: Barry Song <baohua.song@csr.com> */
.l2c_aux_val = 0,
.l2c_aux_mask = ~0,
.map_io = sirfsoc_map_io, .map_io = sirfsoc_map_io,
.dma_zone_size = SZ_256M, .dma_zone_size = SZ_256M,
.init_late = sirfsoc_init_late, .init_late = sirfsoc_init_late,
...@@ -63,6 +67,8 @@ static const char *marco_dt_match[] __initconst = { ...@@ -63,6 +67,8 @@ static const char *marco_dt_match[] __initconst = {
DT_MACHINE_START(MARCO_DT, "Generic MARCO (Flattened Device Tree)") DT_MACHINE_START(MARCO_DT, "Generic MARCO (Flattened Device Tree)")
/* Maintainer: Barry Song <baohua.song@csr.com> */ /* Maintainer: Barry Song <baohua.song@csr.com> */
.l2c_aux_val = 0,
.l2c_aux_mask = ~0,
.smp = smp_ops(sirfsoc_smp_ops), .smp = smp_ops(sirfsoc_smp_ops),
.map_io = sirfsoc_map_io, .map_io = sirfsoc_map_io,
.init_late = sirfsoc_init_late, .init_late = sirfsoc_init_late,
......
/*
* l2 cache initialization for CSR SiRFprimaII
*
* Copyright (c) 2011 Cambridge Silicon Radio Limited, a CSR plc group company.
*
* Licensed under GPLv2 or later.
*/
#include <linux/init.h>
#include <linux/kernel.h>
#include <linux/of.h>
#include <asm/hardware/cache-l2x0.h>
struct l2x0_aux {
u32 val;
u32 mask;
};
static const struct l2x0_aux prima2_l2x0_aux __initconst = {
.val = 2 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT,
.mask = 0,
};
static const struct l2x0_aux marco_l2x0_aux __initconst = {
.val = (2 << L2X0_AUX_CTRL_WAY_SIZE_SHIFT) |
(1 << L2X0_AUX_CTRL_ASSOCIATIVITY_SHIFT),
.mask = L2X0_AUX_CTRL_MASK,
};
static const struct of_device_id sirf_l2x0_ids[] __initconst = {
{ .compatible = "sirf,prima2-pl310-cache", .data = &prima2_l2x0_aux, },
{ .compatible = "sirf,marco-pl310-cache", .data = &marco_l2x0_aux, },
{},
};
static int __init sirfsoc_l2x0_init(void)
{
struct device_node *np;
const struct l2x0_aux *aux;
np = of_find_matching_node(NULL, sirf_l2x0_ids);
if (np) {
aux = of_match_node(sirf_l2x0_ids, np)->data;
return l2x0_of_init(aux->val, aux->mask);
}
return 0;
}
early_initcall(sirfsoc_l2x0_init);
...@@ -71,7 +71,6 @@ static int sirfsoc_pm_enter(suspend_state_t state) ...@@ -71,7 +71,6 @@ static int sirfsoc_pm_enter(suspend_state_t state)
case PM_SUSPEND_MEM: case PM_SUSPEND_MEM:
sirfsoc_pre_suspend_power_off(); sirfsoc_pre_suspend_power_off();
outer_flush_all();
outer_disable(); outer_disable();
/* go zzz */ /* go zzz */
cpu_suspend(0, sirfsoc_finish_suspend); cpu_suspend(0, sirfsoc_finish_suspend);
......
...@@ -837,8 +837,7 @@ static void __init cm_x300_init(void) ...@@ -837,8 +837,7 @@ static void __init cm_x300_init(void)
cm_x300_init_bl(); cm_x300_init_bl();
} }
static void __init cm_x300_fixup(struct tag *tags, char **cmdline, static void __init cm_x300_fixup(struct tag *tags, char **cmdline)
struct meminfo *mi)
{ {
/* Make sure that mi->bank[0].start = PHYS_ADDR */ /* Make sure that mi->bank[0].start = PHYS_ADDR */
for (; tags->hdr.size; tags = tag_next(tags)) for (; tags->hdr.size; tags = tag_next(tags))
......
...@@ -34,6 +34,7 @@ ...@@ -34,6 +34,7 @@
#include <linux/input/matrix_keypad.h> #include <linux/input/matrix_keypad.h>
#include <linux/gpio_keys.h> #include <linux/gpio_keys.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/memblock.h>
#include <video/w100fb.h> #include <video/w100fb.h>
#include <asm/setup.h> #include <asm/setup.h>
...@@ -753,16 +754,13 @@ static void __init corgi_init(void) ...@@ -753,16 +754,13 @@ static void __init corgi_init(void)
platform_add_devices(devices, ARRAY_SIZE(devices)); platform_add_devices(devices, ARRAY_SIZE(devices));
} }
static void __init fixup_corgi(struct tag *tags, char **cmdline, static void __init fixup_corgi(struct tag *tags, char **cmdline)
struct meminfo *mi)
{ {
sharpsl_save_param(); sharpsl_save_param();
mi->nr_banks=1;
mi->bank[0].start = 0xa0000000;
if (machine_is_corgi()) if (machine_is_corgi())
mi->bank[0].size = (32*1024*1024); memblock_add(0xa0000000, SZ_32M);
else else
mi->bank[0].size = (64*1024*1024); memblock_add(0xa0000000, SZ_64M);
} }
#ifdef CONFIG_MACH_CORGI #ifdef CONFIG_MACH_CORGI
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#include <linux/mtd/nand.h> #include <linux/mtd/nand.h>
#include <linux/mtd/partitions.h> #include <linux/mtd/partitions.h>
#include <linux/usb/gpio_vbus.h> #include <linux/usb/gpio_vbus.h>
#include <linux/memblock.h>
#include <video/w100fb.h> #include <video/w100fb.h>
...@@ -41,14 +42,12 @@ ...@@ -41,14 +42,12 @@
#include "clock.h" #include "clock.h"
/* Only e800 has 128MB RAM */ /* Only e800 has 128MB RAM */
void __init eseries_fixup(struct tag *tags, char **cmdline, struct meminfo *mi) void __init eseries_fixup(struct tag *tags, char **cmdline)
{ {
mi->nr_banks=1;
mi->bank[0].start = 0xa0000000;
if (machine_is_e800()) if (machine_is_e800())
mi->bank[0].size = (128*1024*1024); memblock_add(0xa0000000, SZ_128M);
else else
mi->bank[0].size = (64*1024*1024); memblock_add(0xa0000000, SZ_64M);
} }
struct gpio_vbus_mach_info e7xx_udc_info = { struct gpio_vbus_mach_info e7xx_udc_info = {
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <linux/spi/ads7846.h> #include <linux/spi/ads7846.h>
#include <linux/spi/pxa2xx_spi.h> #include <linux/spi/pxa2xx_spi.h>
#include <linux/mtd/sharpsl.h> #include <linux/mtd/sharpsl.h>
#include <linux/memblock.h>
#include <mach/hardware.h> #include <mach/hardware.h>
#include <asm/mach-types.h> #include <asm/mach-types.h>
...@@ -456,13 +457,10 @@ static void __init poodle_init(void) ...@@ -456,13 +457,10 @@ static void __init poodle_init(void)
poodle_init_spi(); poodle_init_spi();
} }
static void __init fixup_poodle(struct tag *tags, char **cmdline, static void __init fixup_poodle(struct tag *tags, char **cmdline)
struct meminfo *mi)
{ {
sharpsl_save_param(); sharpsl_save_param();
mi->nr_banks=1; memblock_add(0xa0000000, SZ_32M);
mi->bank[0].start = 0xa0000000;
mi->bank[0].size = (32*1024*1024);
} }
MACHINE_START(POODLE, "SHARP Poodle") MACHINE_START(POODLE, "SHARP Poodle")
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <linux/io.h> #include <linux/io.h>
#include <linux/module.h> #include <linux/module.h>
#include <linux/reboot.h> #include <linux/reboot.h>
#include <linux/memblock.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/mach-types.h> #include <asm/mach-types.h>
...@@ -971,13 +972,10 @@ static void __init spitz_init(void) ...@@ -971,13 +972,10 @@ static void __init spitz_init(void)
spitz_i2c_init(); spitz_i2c_init();
} }
static void __init spitz_fixup(struct tag *tags, char **cmdline, static void __init spitz_fixup(struct tag *tags, char **cmdline)
struct meminfo *mi)
{ {
sharpsl_save_param(); sharpsl_save_param();
mi->nr_banks = 1; memblock_add(0xa0000000, SZ_64M);
mi->bank[0].start = 0xa0000000;
mi->bank[0].size = (64*1024*1024);
} }
#ifdef CONFIG_MACH_SPITZ #ifdef CONFIG_MACH_SPITZ
......
...@@ -37,6 +37,7 @@ ...@@ -37,6 +37,7 @@
#include <linux/i2c/pxa-i2c.h> #include <linux/i2c/pxa-i2c.h>
#include <linux/usb/gpio_vbus.h> #include <linux/usb/gpio_vbus.h>
#include <linux/reboot.h> #include <linux/reboot.h>
#include <linux/memblock.h>
#include <asm/setup.h> #include <asm/setup.h>
#include <asm/mach-types.h> #include <asm/mach-types.h>
...@@ -960,13 +961,10 @@ static void __init tosa_init(void) ...@@ -960,13 +961,10 @@ static void __init tosa_init(void)
platform_add_devices(devices, ARRAY_SIZE(devices)); platform_add_devices(devices, ARRAY_SIZE(devices));
} }
static void __init fixup_tosa(struct tag *tags, char **cmdline, static void __init fixup_tosa(struct tag *tags, char **cmdline)
struct meminfo *mi)
{ {
sharpsl_save_param(); sharpsl_save_param();
mi->nr_banks=1; memblock_add(0xa0000000, SZ_64M);
mi->bank[0].start = 0xa0000000;
mi->bank[0].size = (64*1024*1024);
} }
MACHINE_START(TOSA, "SHARP Tosa") MACHINE_START(TOSA, "SHARP Tosa")
......
...@@ -31,6 +31,7 @@ ...@@ -31,6 +31,7 @@
#include <linux/amba/mmci.h> #include <linux/amba/mmci.h>
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/mtd/physmap.h> #include <linux/mtd/physmap.h>
#include <linux/memblock.h>
#include <mach/hardware.h> #include <mach/hardware.h>
#include <asm/irq.h> #include <asm/irq.h>
...@@ -385,19 +386,15 @@ void __init realview_timer_init(unsigned int timer_irq) ...@@ -385,19 +386,15 @@ void __init realview_timer_init(unsigned int timer_irq)
/* /*
* Setup the memory banks. * Setup the memory banks.
*/ */
void realview_fixup(struct tag *tags, char **from, struct meminfo *meminfo) void realview_fixup(struct tag *tags, char **from)
{ {
/* /*
* Most RealView platforms have 512MB contiguous RAM at 0x70000000. * Most RealView platforms have 512MB contiguous RAM at 0x70000000.
* Half of this is mirrored at 0. * Half of this is mirrored at 0.
*/ */
#ifdef CONFIG_REALVIEW_HIGH_PHYS_OFFSET #ifdef CONFIG_REALVIEW_HIGH_PHYS_OFFSET
meminfo->bank[0].start = 0x70000000; memblock_add(0x70000000, SZ_512M);
meminfo->bank[0].size = SZ_512M;
meminfo->nr_banks = 1;
#else #else
meminfo->bank[0].start = 0; memblock_add(0, SZ_256M);
meminfo->bank[0].size = SZ_256M;
meminfo->nr_banks = 1;
#endif #endif
} }
...@@ -52,8 +52,7 @@ extern int realview_flash_register(struct resource *res, u32 num); ...@@ -52,8 +52,7 @@ extern int realview_flash_register(struct resource *res, u32 num);
extern int realview_eth_register(const char *name, struct resource *res); extern int realview_eth_register(const char *name, struct resource *res);
extern int realview_usb_register(struct resource *res); extern int realview_usb_register(struct resource *res);
extern void realview_init_early(void); extern void realview_init_early(void);
extern void realview_fixup(struct tag *tags, char **from, extern void realview_fixup(struct tag *tags, char **from);
struct meminfo *meminfo);
extern struct smp_operations realview_smp_ops; extern struct smp_operations realview_smp_ops;
extern void realview_cpu_die(unsigned int cpu); extern void realview_cpu_die(unsigned int cpu);
......
...@@ -442,8 +442,13 @@ static void __init realview_eb_init(void) ...@@ -442,8 +442,13 @@ static void __init realview_eb_init(void)
realview_eb11mp_fixup(); realview_eb11mp_fixup();
#ifdef CONFIG_CACHE_L2X0 #ifdef CONFIG_CACHE_L2X0
/* 1MB (128KB/way), 8-way associativity, evmon/parity/share enabled /*
* Bits: .... ...0 0111 1001 0000 .... .... .... */ * The PL220 needs to be manually configured as the hardware
* doesn't report the correct sizes.
* 1MB (128KB/way), 8-way associativity, event monitor and
* parity enabled, ignore share bit, no force write allocate
* Bits: .... ...0 0111 1001 0000 .... .... ....
*/
l2x0_init(__io_address(REALVIEW_EB11MP_L220_BASE), 0x00790000, 0xfe000fff); l2x0_init(__io_address(REALVIEW_EB11MP_L220_BASE), 0x00790000, 0xfe000fff);
#endif #endif
platform_device_register(&pmu_device); platform_device_register(&pmu_device);
......
...@@ -32,6 +32,7 @@ ...@@ -32,6 +32,7 @@
#include <linux/irqchip/arm-gic.h> #include <linux/irqchip/arm-gic.h>
#include <linux/platform_data/clk-realview.h> #include <linux/platform_data/clk-realview.h>
#include <linux/reboot.h> #include <linux/reboot.h>
#include <linux/memblock.h>
#include <mach/hardware.h> #include <mach/hardware.h>
#include <asm/irq.h> #include <asm/irq.h>
...@@ -339,15 +340,12 @@ static void realview_pb1176_restart(enum reboot_mode mode, const char *cmd) ...@@ -339,15 +340,12 @@ static void realview_pb1176_restart(enum reboot_mode mode, const char *cmd)
dsb(); dsb();
} }
static void realview_pb1176_fixup(struct tag *tags, char **from, static void realview_pb1176_fixup(struct tag *tags, char **from)
struct meminfo *meminfo)
{ {
/* /*
* RealView PB1176 only has 128MB of RAM mapped at 0. * RealView PB1176 only has 128MB of RAM mapped at 0.
*/ */
meminfo->bank[0].start = 0; memblock_add(0, SZ_128M);
meminfo->bank[0].size = SZ_128M;
meminfo->nr_banks = 1;
} }
static void __init realview_pb1176_init(void) static void __init realview_pb1176_init(void)
...@@ -355,7 +353,13 @@ static void __init realview_pb1176_init(void) ...@@ -355,7 +353,13 @@ static void __init realview_pb1176_init(void)
int i; int i;
#ifdef CONFIG_CACHE_L2X0 #ifdef CONFIG_CACHE_L2X0
/* 128Kb (16Kb/way) 8-way associativity. evmon/parity/share enabled. */ /*
* The PL220 needs to be manually configured as the hardware
* doesn't report the correct sizes.
* 128kB (16kB/way), 8-way associativity, event monitor and
* parity enabled, ignore share bit, no force write allocate
* Bits: .... ...0 0111 0011 0000 .... .... ....
*/
l2x0_init(__io_address(REALVIEW_PB1176_L220_BASE), 0x00730000, 0xfe000fff); l2x0_init(__io_address(REALVIEW_PB1176_L220_BASE), 0x00730000, 0xfe000fff);
#endif #endif
......
...@@ -337,8 +337,13 @@ static void __init realview_pb11mp_init(void) ...@@ -337,8 +337,13 @@ static void __init realview_pb11mp_init(void)
int i; int i;
#ifdef CONFIG_CACHE_L2X0 #ifdef CONFIG_CACHE_L2X0
/* 1MB (128KB/way), 8-way associativity, evmon/parity/share enabled /*
* Bits: .... ...0 0111 1001 0000 .... .... .... */ * The PL220 needs to be manually configured as the hardware
* doesn't report the correct sizes.
* 1MB (128KB/way), 8-way associativity, event monitor and
* parity enabled, ignore share bit, no force write allocate
* Bits: .... ...0 0111 1001 0000 .... .... ....
*/
l2x0_init(__io_address(REALVIEW_TC11MP_L220_BASE), 0x00790000, 0xfe000fff); l2x0_init(__io_address(REALVIEW_TC11MP_L220_BASE), 0x00790000, 0xfe000fff);
#endif #endif
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include <linux/irqchip/arm-gic.h> #include <linux/irqchip/arm-gic.h>
#include <linux/platform_data/clk-realview.h> #include <linux/platform_data/clk-realview.h>
#include <linux/reboot.h> #include <linux/reboot.h>
#include <linux/memblock.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/mach-types.h> #include <asm/mach-types.h>
...@@ -325,23 +326,19 @@ static void __init realview_pbx_timer_init(void) ...@@ -325,23 +326,19 @@ static void __init realview_pbx_timer_init(void)
realview_pbx_twd_init(); realview_pbx_twd_init();
} }
static void realview_pbx_fixup(struct tag *tags, char **from, static void realview_pbx_fixup(struct tag *tags, char **from)
struct meminfo *meminfo)
{ {
#ifdef CONFIG_SPARSEMEM #ifdef CONFIG_SPARSEMEM
/* /*
* Memory configuration with SPARSEMEM enabled on RealView PBX (see * Memory configuration with SPARSEMEM enabled on RealView PBX (see
* asm/mach/memory.h for more information). * asm/mach/memory.h for more information).
*/ */
meminfo->bank[0].start = 0;
meminfo->bank[0].size = SZ_256M; memblock_add(0, SZ_256M);
meminfo->bank[1].start = 0x20000000; memblock_add(0x20000000, SZ_512M);
meminfo->bank[1].size = SZ_512M; memblock_add(0x80000000, SZ_256M);
meminfo->bank[2].start = 0x80000000;
meminfo->bank[2].size = SZ_256M;
meminfo->nr_banks = 3;
#else #else
realview_fixup(tags, from, meminfo); realview_fixup(tags, from);
#endif #endif
} }
...@@ -370,8 +367,8 @@ static void __init realview_pbx_init(void) ...@@ -370,8 +367,8 @@ static void __init realview_pbx_init(void)
__io_address(REALVIEW_PBX_TILE_L220_BASE); __io_address(REALVIEW_PBX_TILE_L220_BASE);
/* set RAM latencies to 1 cycle for eASIC */ /* set RAM latencies to 1 cycle for eASIC */
writel(0, l2x0_base + L2X0_TAG_LATENCY_CTRL); writel(0, l2x0_base + L310_TAG_LATENCY_CTRL);
writel(0, l2x0_base + L2X0_DATA_LATENCY_CTRL); writel(0, l2x0_base + L310_DATA_LATENCY_CTRL);
/* 16KB way size, 8-way associativity, parity disabled /* 16KB way size, 8-way associativity, parity disabled
* Bits: .. 0 0 0 0 1 00 1 0 1 001 0 000 0 .... .... .... */ * Bits: .. 0 0 0 0 1 00 1 0 1 001 0 000 0 .... .... .... */
......
...@@ -24,12 +24,6 @@ ...@@ -24,12 +24,6 @@
#include <asm/hardware/cache-l2x0.h> #include <asm/hardware/cache-l2x0.h>
#include "core.h" #include "core.h"
static void __init rockchip_dt_init(void)
{
l2x0_of_init(0, ~0UL);
of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
}
static const char * const rockchip_board_dt_compat[] = { static const char * const rockchip_board_dt_compat[] = {
"rockchip,rk2928", "rockchip,rk2928",
"rockchip,rk3066a", "rockchip,rk3066a",
...@@ -39,6 +33,7 @@ static const char * const rockchip_board_dt_compat[] = { ...@@ -39,6 +33,7 @@ static const char * const rockchip_board_dt_compat[] = {
}; };
DT_MACHINE_START(ROCKCHIP_DT, "Rockchip Cortex-A9 (Device Tree)") DT_MACHINE_START(ROCKCHIP_DT, "Rockchip Cortex-A9 (Device Tree)")
.init_machine = rockchip_dt_init, .l2c_aux_val = 0,
.l2c_aux_mask = ~0,
.dt_compat = rockchip_board_dt_compat, .dt_compat = rockchip_board_dt_compat,
MACHINE_END MACHINE_END
...@@ -22,6 +22,7 @@ ...@@ -22,6 +22,7 @@
#include <linux/serial_s3c.h> #include <linux/serial_s3c.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/memblock.h>
#include <asm/mach/arch.h> #include <asm/mach/arch.h>
#include <asm/mach/map.h> #include <asm/mach/map.h>
...@@ -93,13 +94,10 @@ static struct platform_device *smdk2413_devices[] __initdata = { ...@@ -93,13 +94,10 @@ static struct platform_device *smdk2413_devices[] __initdata = {
&s3c2412_device_dma, &s3c2412_device_dma,
}; };
static void __init smdk2413_fixup(struct tag *tags, char **cmdline, static void __init smdk2413_fixup(struct tag *tags, char **cmdline)
struct meminfo *mi)
{ {
if (tags != phys_to_virt(S3C2410_SDRAM_PA + 0x100)) { if (tags != phys_to_virt(S3C2410_SDRAM_PA + 0x100)) {
mi->nr_banks=1; memblock_add(0x30000000, SZ_64M);
mi->bank[0].start = 0x30000000;
mi->bank[0].size = SZ_64M;
} }
} }
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <linux/mtd/nand.h> #include <linux/mtd/nand.h>
#include <linux/mtd/nand_ecc.h> #include <linux/mtd/nand_ecc.h>
#include <linux/mtd/partitions.h> #include <linux/mtd/partitions.h>
#include <linux/memblock.h>
#include <asm/mach/arch.h> #include <asm/mach/arch.h>
#include <asm/mach/map.h> #include <asm/mach/map.h>
...@@ -129,13 +130,10 @@ static struct platform_device *vstms_devices[] __initdata = { ...@@ -129,13 +130,10 @@ static struct platform_device *vstms_devices[] __initdata = {
&s3c2412_device_dma, &s3c2412_device_dma,
}; };
static void __init vstms_fixup(struct tag *tags, char **cmdline, static void __init vstms_fixup(struct tag *tags, char **cmdline)
struct meminfo *mi)
{ {
if (tags != phys_to_virt(S3C2410_SDRAM_PA + 0x100)) { if (tags != phys_to_virt(S3C2410_SDRAM_PA + 0x100)) {
mi->nr_banks=1; memblock_add(0x30000000, SZ_64M);
mi->bank[0].start = 0x30000000;
mi->bank[0].size = SZ_64M;
} }
} }
......
...@@ -531,7 +531,7 @@ static void __init get_assabet_scr(void) ...@@ -531,7 +531,7 @@ static void __init get_assabet_scr(void)
} }
static void __init static void __init
fixup_assabet(struct tag *tags, char **cmdline, struct meminfo *mi) fixup_assabet(struct tag *tags, char **cmdline)
{ {
/* This must be done before any call to machine_has_neponset() */ /* This must be done before any call to machine_has_neponset() */
map_sa1100_gpio_regs(); map_sa1100_gpio_regs();
......
...@@ -164,8 +164,8 @@ static void __init eva_init(void) ...@@ -164,8 +164,8 @@ static void __init eva_init(void)
r8a7740_meram_workaround(); r8a7740_meram_workaround();
#ifdef CONFIG_CACHE_L2X0 #ifdef CONFIG_CACHE_L2X0
/* Early BRESP enable, Shared attribute override enable, 32K*8way */ /* Shared attribute override enable, 32K*8way */
l2x0_init(IOMEM(0xf0002000), 0x40440000, 0x82000fff); l2x0_init(IOMEM(0xf0002000), 0x00400000, 0xc20f0fff);
#endif #endif
r8a7740_add_standard_devices_dt(); r8a7740_add_standard_devices_dt();
......
...@@ -1271,8 +1271,8 @@ static void __init eva_init(void) ...@@ -1271,8 +1271,8 @@ static void __init eva_init(void)
#ifdef CONFIG_CACHE_L2X0 #ifdef CONFIG_CACHE_L2X0
/* Early BRESP enable, Shared attribute override enable, 32K*8way */ /* Shared attribute override enable, 32K*8way */
l2x0_init(IOMEM(0xf0002000), 0x40440000, 0x82000fff); l2x0_init(IOMEM(0xf0002000), 0x00400000, 0xc20f0fff);
#endif #endif
i2c_register_board_info(0, i2c0_devices, ARRAY_SIZE(i2c0_devices)); i2c_register_board_info(0, i2c0_devices, ARRAY_SIZE(i2c0_devices));
......
...@@ -36,8 +36,8 @@ static void __init kzm_init(void) ...@@ -36,8 +36,8 @@ static void __init kzm_init(void)
sh73a0_add_standard_devices_dt(); sh73a0_add_standard_devices_dt();
#ifdef CONFIG_CACHE_L2X0 #ifdef CONFIG_CACHE_L2X0
/* Early BRESP enable, Shared attribute override enable, 64K*8way */ /* Shared attribute override enable, 64K*8way */
l2x0_init(IOMEM(0xf0100000), 0x40460000, 0x82000fff); l2x0_init(IOMEM(0xf0100000), 0x00400000, 0xc20f0fff);
#endif #endif
} }
......
...@@ -876,8 +876,8 @@ static void __init kzm_init(void) ...@@ -876,8 +876,8 @@ static void __init kzm_init(void)
gpio_request_one(223, GPIOF_IN, NULL); /* IRQ8 */ gpio_request_one(223, GPIOF_IN, NULL); /* IRQ8 */
#ifdef CONFIG_CACHE_L2X0 #ifdef CONFIG_CACHE_L2X0
/* Early BRESP enable, Shared attribute override enable, 64K*8way */ /* Shared attribute override enable, 64K*8way */
l2x0_init(IOMEM(0xf0100000), 0x40460000, 0x82000fff); l2x0_init(IOMEM(0xf0100000), 0x00400000, 0xc20f0fff);
#endif #endif
i2c_register_board_info(0, i2c0_devices, ARRAY_SIZE(i2c0_devices)); i2c_register_board_info(0, i2c0_devices, ARRAY_SIZE(i2c0_devices));
......
...@@ -285,10 +285,10 @@ void __init r8a7778_add_dt_devices(void) ...@@ -285,10 +285,10 @@ void __init r8a7778_add_dt_devices(void)
void __iomem *base = ioremap_nocache(0xf0100000, 0x1000); void __iomem *base = ioremap_nocache(0xf0100000, 0x1000);
if (base) { if (base) {
/* /*
* Early BRESP enable, Shared attribute override enable, 64K*16way * Shared attribute override enable, 64K*16way
* don't call iounmap(base) * don't call iounmap(base)
*/ */
l2x0_init(base, 0x40470000, 0x82000fff); l2x0_init(base, 0x00400000, 0xc20f0fff);
} }
#endif #endif
......
...@@ -660,8 +660,8 @@ static struct platform_device *r8a7779_standard_devices[] __initdata = { ...@@ -660,8 +660,8 @@ static struct platform_device *r8a7779_standard_devices[] __initdata = {
void __init r8a7779_add_standard_devices(void) void __init r8a7779_add_standard_devices(void)
{ {
#ifdef CONFIG_CACHE_L2X0 #ifdef CONFIG_CACHE_L2X0
/* Early BRESP enable, Shared attribute override enable, 64K*16way */ /* Shared attribute override enable, 64K*16way */
l2x0_init(IOMEM(0xf0100000), 0x40470000, 0x82000fff); l2x0_init(IOMEM(0xf0100000), 0x00400000, 0xc20f0fff);
#endif #endif
r8a7779_pm_init(); r8a7779_pm_init();
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment