Commit 32f9aab8 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.linaro.org/people/rmk/linux-arm

Pull ARM updates (part one) from Russell King:

 - MMC patches from Ulf Hansson and Pawel Moll.  These add support for
   DDR mode and the latest variant found on ARM Versatile Express, as
   well as a number of cleanups.

 - A fix for to improve the behaviour of ARMs sched_clock()

 - Changes to the ARM ioremap() code.  I'm not convinced with the
   primary arguments for this, but it's been around for a while, and
   people seem happy with it - and the "other" justification for this is
   at

      http://lkml.org/lkml/2012/12/6/184

 - Add SCHED_HRTICK to ARMs Kconfig

 - Making the ARM SHA/AES code Thumb-2 compatible

 - A collection of other small updates.

* 'for-linus' of git://git.linaro.org/people/rmk/linux-arm: (26 commits)
  ARM: add SCHED_HRTICK config option
  ARM: 7650/1: mm: replace direct access to mm->context.id with new macro
  ARM: 7649/1: mm: mm->context.id fix for big-endian
  ARM: 7648/1: pci: Allow passing per-controller private data
  ARM: 7647/1: pci: Keep pci_common_init() around after init
  ARM: fix warnings introduced by previous patch
  ARM: 7646/1: mm: use static_vm for managing static mapped areas
  ARM: 7645/1: ioremap: introduce an infrastructure for static mapped area
  ARM: 7644/1: vmregion: remove vmregion code entirely
  MAINTAINERS: Re-assert MMCI driver maintainer status
  MAINTAINERS: add additional file for MMCI driver
  MAINTAINERS: add maintainer entry for AMBA serial drivers
  ARM: 7637/1: memory: use SZ_ constants for defining the virtual memory layout
  ARM: 7643/1: sched: correct update_sched_clock()
  ARM: 7635/1: versatile: fix the PCI IRQ regression
  ARM: 7639/1: cache-l2x0: add missed dummy outer_resume entry
  ARM: 7630/1: mmc: mmci: Fixup and cleanup code for DMA handling
  ARM: 7632/1: spinlock: avoid exclusive accesses on unlock() path
  ARM: 7631/1: mmc: mmci: Add new VE MMCI variant
  ARM: 7623/1: mmc: mmci: Fixup clock gating when freq is 0 for ST-variants
  ...
parents e177bb58 1b1c7409
...@@ -670,8 +670,16 @@ F: drivers/input/serio/ambakmi.* ...@@ -670,8 +670,16 @@ F: drivers/input/serio/ambakmi.*
F: include/linux/amba/kmi.h F: include/linux/amba/kmi.h
ARM PRIMECELL MMCI PL180/1 DRIVER ARM PRIMECELL MMCI PL180/1 DRIVER
S: Orphan M: Russell King <linux@arm.linux.org.uk>
S: Maintained
F: drivers/mmc/host/mmci.* F: drivers/mmc/host/mmci.*
F: include/linux/amba/mmci.h
ARM PRIMECELL UART PL010 AND PL011 DRIVERS
M: Russell King <linux@arm.linux.org.uk>
S: Maintained
F: drivers/tty/serial/amba-pl01*.c
F: include/linux/amba/serial.h
ARM PRIMECELL BUS SUPPORT ARM PRIMECELL BUS SUPPORT
M: Russell King <linux@arm.linux.org.uk> M: Russell King <linux@arm.linux.org.uk>
......
...@@ -1654,6 +1654,9 @@ config HZ ...@@ -1654,6 +1654,9 @@ config HZ
default SHMOBILE_TIMER_HZ if ARCH_SHMOBILE default SHMOBILE_TIMER_HZ if ARCH_SHMOBILE
default 100 default 100
config SCHED_HRTICK
def_bool HIGH_RES_TIMERS
config THUMB2_KERNEL config THUMB2_KERNEL
bool "Compile the kernel in Thumb-2 mode" bool "Compile the kernel in Thumb-2 mode"
depends on CPU_V7 && !CPU_V6 && !CPU_V6K depends on CPU_V7 && !CPU_V6 && !CPU_V6K
......
...@@ -34,8 +34,9 @@ ...@@ -34,8 +34,9 @@
@ A little glue here to select the correct code below for the ARM CPU @ A little glue here to select the correct code below for the ARM CPU
@ that is being targetted. @ that is being targetted.
#include <linux/linkage.h>
.text .text
.code 32
.type AES_Te,%object .type AES_Te,%object
.align 5 .align 5
...@@ -145,10 +146,8 @@ AES_Te: ...@@ -145,10 +146,8 @@ AES_Te:
@ void AES_encrypt(const unsigned char *in, unsigned char *out, @ void AES_encrypt(const unsigned char *in, unsigned char *out,
@ const AES_KEY *key) { @ const AES_KEY *key) {
.global AES_encrypt
.type AES_encrypt,%function
.align 5 .align 5
AES_encrypt: ENTRY(AES_encrypt)
sub r3,pc,#8 @ AES_encrypt sub r3,pc,#8 @ AES_encrypt
stmdb sp!,{r1,r4-r12,lr} stmdb sp!,{r1,r4-r12,lr}
mov r12,r0 @ inp mov r12,r0 @ inp
...@@ -239,15 +238,8 @@ AES_encrypt: ...@@ -239,15 +238,8 @@ AES_encrypt:
strb r6,[r12,#14] strb r6,[r12,#14]
strb r3,[r12,#15] strb r3,[r12,#15]
#endif #endif
#if __ARM_ARCH__>=5
ldmia sp!,{r4-r12,pc} ldmia sp!,{r4-r12,pc}
#else ENDPROC(AES_encrypt)
ldmia sp!,{r4-r12,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.size AES_encrypt,.-AES_encrypt
.type _armv4_AES_encrypt,%function .type _armv4_AES_encrypt,%function
.align 2 .align 2
...@@ -386,10 +378,8 @@ _armv4_AES_encrypt: ...@@ -386,10 +378,8 @@ _armv4_AES_encrypt:
ldr pc,[sp],#4 @ pop and return ldr pc,[sp],#4 @ pop and return
.size _armv4_AES_encrypt,.-_armv4_AES_encrypt .size _armv4_AES_encrypt,.-_armv4_AES_encrypt
.global private_AES_set_encrypt_key
.type private_AES_set_encrypt_key,%function
.align 5 .align 5
private_AES_set_encrypt_key: ENTRY(private_AES_set_encrypt_key)
_armv4_AES_set_encrypt_key: _armv4_AES_set_encrypt_key:
sub r3,pc,#8 @ AES_set_encrypt_key sub r3,pc,#8 @ AES_set_encrypt_key
teq r0,#0 teq r0,#0
...@@ -658,15 +648,11 @@ _armv4_AES_set_encrypt_key: ...@@ -658,15 +648,11 @@ _armv4_AES_set_encrypt_key:
.Ldone: mov r0,#0 .Ldone: mov r0,#0
ldmia sp!,{r4-r12,lr} ldmia sp!,{r4-r12,lr}
.Labrt: tst lr,#1 .Labrt: mov pc,lr
moveq pc,lr @ be binary compatible with V4, yet ENDPROC(private_AES_set_encrypt_key)
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
.size private_AES_set_encrypt_key,.-private_AES_set_encrypt_key
.global private_AES_set_decrypt_key
.type private_AES_set_decrypt_key,%function
.align 5 .align 5
private_AES_set_decrypt_key: ENTRY(private_AES_set_decrypt_key)
str lr,[sp,#-4]! @ push lr str lr,[sp,#-4]! @ push lr
#if 0 #if 0
@ kernel does both of these in setkey so optimise this bit out by @ kernel does both of these in setkey so optimise this bit out by
...@@ -748,15 +734,8 @@ private_AES_set_decrypt_key: ...@@ -748,15 +734,8 @@ private_AES_set_decrypt_key:
bne .Lmix bne .Lmix
mov r0,#0 mov r0,#0
#if __ARM_ARCH__>=5
ldmia sp!,{r4-r12,pc} ldmia sp!,{r4-r12,pc}
#else ENDPROC(private_AES_set_decrypt_key)
ldmia sp!,{r4-r12,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.size private_AES_set_decrypt_key,.-private_AES_set_decrypt_key
.type AES_Td,%object .type AES_Td,%object
.align 5 .align 5
...@@ -862,10 +841,8 @@ AES_Td: ...@@ -862,10 +841,8 @@ AES_Td:
@ void AES_decrypt(const unsigned char *in, unsigned char *out, @ void AES_decrypt(const unsigned char *in, unsigned char *out,
@ const AES_KEY *key) { @ const AES_KEY *key) {
.global AES_decrypt
.type AES_decrypt,%function
.align 5 .align 5
AES_decrypt: ENTRY(AES_decrypt)
sub r3,pc,#8 @ AES_decrypt sub r3,pc,#8 @ AES_decrypt
stmdb sp!,{r1,r4-r12,lr} stmdb sp!,{r1,r4-r12,lr}
mov r12,r0 @ inp mov r12,r0 @ inp
...@@ -956,15 +933,8 @@ AES_decrypt: ...@@ -956,15 +933,8 @@ AES_decrypt:
strb r6,[r12,#14] strb r6,[r12,#14]
strb r3,[r12,#15] strb r3,[r12,#15]
#endif #endif
#if __ARM_ARCH__>=5
ldmia sp!,{r4-r12,pc} ldmia sp!,{r4-r12,pc}
#else ENDPROC(AES_decrypt)
ldmia sp!,{r4-r12,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.size AES_decrypt,.-AES_decrypt
.type _armv4_AES_decrypt,%function .type _armv4_AES_decrypt,%function
.align 2 .align 2
...@@ -1064,7 +1034,9 @@ _armv4_AES_decrypt: ...@@ -1064,7 +1034,9 @@ _armv4_AES_decrypt:
and r9,lr,r1,lsr#8 and r9,lr,r1,lsr#8
ldrb r7,[r10,r7] @ Td4[s1>>0] ldrb r7,[r10,r7] @ Td4[s1>>0]
ldrb r1,[r10,r1,lsr#24] @ Td4[s1>>24] ARM( ldrb r1,[r10,r1,lsr#24] ) @ Td4[s1>>24]
THUMB( add r1,r10,r1,lsr#24 ) @ Td4[s1>>24]
THUMB( ldrb r1,[r1] )
ldrb r8,[r10,r8] @ Td4[s1>>16] ldrb r8,[r10,r8] @ Td4[s1>>16]
eor r0,r7,r0,lsl#24 eor r0,r7,r0,lsl#24
ldrb r9,[r10,r9] @ Td4[s1>>8] ldrb r9,[r10,r9] @ Td4[s1>>8]
...@@ -1077,7 +1049,9 @@ _armv4_AES_decrypt: ...@@ -1077,7 +1049,9 @@ _armv4_AES_decrypt:
ldrb r8,[r10,r8] @ Td4[s2>>0] ldrb r8,[r10,r8] @ Td4[s2>>0]
and r9,lr,r2,lsr#16 and r9,lr,r2,lsr#16
ldrb r2,[r10,r2,lsr#24] @ Td4[s2>>24] ARM( ldrb r2,[r10,r2,lsr#24] ) @ Td4[s2>>24]
THUMB( add r2,r10,r2,lsr#24 ) @ Td4[s2>>24]
THUMB( ldrb r2,[r2] )
eor r0,r0,r7,lsl#8 eor r0,r0,r7,lsl#8
ldrb r9,[r10,r9] @ Td4[s2>>16] ldrb r9,[r10,r9] @ Td4[s2>>16]
eor r1,r8,r1,lsl#16 eor r1,r8,r1,lsl#16
...@@ -1090,7 +1064,9 @@ _armv4_AES_decrypt: ...@@ -1090,7 +1064,9 @@ _armv4_AES_decrypt:
and r9,lr,r3 @ i2 and r9,lr,r3 @ i2
ldrb r9,[r10,r9] @ Td4[s3>>0] ldrb r9,[r10,r9] @ Td4[s3>>0]
ldrb r3,[r10,r3,lsr#24] @ Td4[s3>>24] ARM( ldrb r3,[r10,r3,lsr#24] ) @ Td4[s3>>24]
THUMB( add r3,r10,r3,lsr#24 ) @ Td4[s3>>24]
THUMB( ldrb r3,[r3] )
eor r0,r0,r7,lsl#16 eor r0,r0,r7,lsl#16
ldr r7,[r11,#0] ldr r7,[r11,#0]
eor r1,r1,r8,lsl#8 eor r1,r1,r8,lsl#8
......
...@@ -51,13 +51,12 @@ ...@@ -51,13 +51,12 @@
@ Profiler-assisted and platform-specific optimization resulted in 10% @ Profiler-assisted and platform-specific optimization resulted in 10%
@ improvement on Cortex A8 core and 12.2 cycles per byte. @ improvement on Cortex A8 core and 12.2 cycles per byte.
.text #include <linux/linkage.h>
.global sha1_block_data_order .text
.type sha1_block_data_order,%function
.align 2 .align 2
sha1_block_data_order: ENTRY(sha1_block_data_order)
stmdb sp!,{r4-r12,lr} stmdb sp!,{r4-r12,lr}
add r2,r1,r2,lsl#6 @ r2 to point at the end of r1 add r2,r1,r2,lsl#6 @ r2 to point at the end of r1
ldmia r0,{r3,r4,r5,r6,r7} ldmia r0,{r3,r4,r5,r6,r7}
...@@ -194,7 +193,7 @@ sha1_block_data_order: ...@@ -194,7 +193,7 @@ sha1_block_data_order:
eor r10,r10,r7,ror#2 @ F_00_19(B,C,D) eor r10,r10,r7,ror#2 @ F_00_19(B,C,D)
str r9,[r14,#-4]! str r9,[r14,#-4]!
add r3,r3,r10 @ E+=F_00_19(B,C,D) add r3,r3,r10 @ E+=F_00_19(B,C,D)
teq r14,sp cmp r14,sp
bne .L_00_15 @ [((11+4)*5+2)*3] bne .L_00_15 @ [((11+4)*5+2)*3]
#if __ARM_ARCH__<7 #if __ARM_ARCH__<7
ldrb r10,[r1,#2] ldrb r10,[r1,#2]
...@@ -374,7 +373,9 @@ sha1_block_data_order: ...@@ -374,7 +373,9 @@ sha1_block_data_order:
@ F_xx_xx @ F_xx_xx
add r3,r3,r9 @ E+=X[i] add r3,r3,r9 @ E+=X[i]
add r3,r3,r10 @ E+=F_20_39(B,C,D) add r3,r3,r10 @ E+=F_20_39(B,C,D)
teq r14,sp @ preserve carry ARM( teq r14,sp ) @ preserve carry
THUMB( mov r11,sp )
THUMB( teq r14,r11 ) @ preserve carry
bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4] bne .L_20_39_or_60_79 @ [+((12+3)*5+2)*4]
bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes bcs .L_done @ [+((12+3)*5+2)*4], spare 300 bytes
...@@ -466,7 +467,7 @@ sha1_block_data_order: ...@@ -466,7 +467,7 @@ sha1_block_data_order:
add r3,r3,r9 @ E+=X[i] add r3,r3,r9 @ E+=X[i]
add r3,r3,r10 @ E+=F_40_59(B,C,D) add r3,r3,r10 @ E+=F_40_59(B,C,D)
add r3,r3,r11,ror#2 add r3,r3,r11,ror#2
teq r14,sp cmp r14,sp
bne .L_40_59 @ [+((12+5)*5+2)*4] bne .L_40_59 @ [+((12+5)*5+2)*4]
ldr r8,.LK_60_79 ldr r8,.LK_60_79
...@@ -485,19 +486,12 @@ sha1_block_data_order: ...@@ -485,19 +486,12 @@ sha1_block_data_order:
teq r1,r2 teq r1,r2
bne .Lloop @ [+18], total 1307 bne .Lloop @ [+18], total 1307
#if __ARM_ARCH__>=5
ldmia sp!,{r4-r12,pc} ldmia sp!,{r4-r12,pc}
#else
ldmia sp!,{r4-r12,lr}
tst lr,#1
moveq pc,lr @ be binary compatible with V4, yet
.word 0xe12fff1e @ interoperable with Thumb ISA:-)
#endif
.align 2 .align 2
.LK_00_19: .word 0x5a827999 .LK_00_19: .word 0x5a827999
.LK_20_39: .word 0x6ed9eba1 .LK_20_39: .word 0x6ed9eba1
.LK_40_59: .word 0x8f1bbcdc .LK_40_59: .word 0x8f1bbcdc
.LK_60_79: .word 0xca62c1d6 .LK_60_79: .word 0xca62c1d6
.size sha1_block_data_order,.-sha1_block_data_order ENDPROC(sha1_block_data_order)
.asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by <appro@openssl.org>" .asciz "SHA1 block transform for ARMv4, CRYPTOGAMS by <appro@openssl.org>"
.align 2 .align 2
...@@ -23,6 +23,7 @@ struct hw_pci { ...@@ -23,6 +23,7 @@ struct hw_pci {
#endif #endif
struct pci_ops *ops; struct pci_ops *ops;
int nr_controllers; int nr_controllers;
void **private_data;
int (*setup)(int nr, struct pci_sys_data *); int (*setup)(int nr, struct pci_sys_data *);
struct pci_bus *(*scan)(int nr, struct pci_sys_data *); struct pci_bus *(*scan)(int nr, struct pci_sys_data *);
void (*preinit)(void); void (*preinit)(void);
......
...@@ -36,23 +36,23 @@ ...@@ -36,23 +36,23 @@
* TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area * TASK_UNMAPPED_BASE - the lower boundary of the mmap VM area
*/ */
#define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET) #define PAGE_OFFSET UL(CONFIG_PAGE_OFFSET)
#define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(0x01000000)) #define TASK_SIZE (UL(CONFIG_PAGE_OFFSET) - UL(SZ_16M))
#define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M) #define TASK_UNMAPPED_BASE ALIGN(TASK_SIZE / 3, SZ_16M)
/* /*
* The maximum size of a 26-bit user space task. * The maximum size of a 26-bit user space task.
*/ */
#define TASK_SIZE_26 UL(0x04000000) #define TASK_SIZE_26 (UL(1) << 26)
/* /*
* The module space lives between the addresses given by TASK_SIZE * The module space lives between the addresses given by TASK_SIZE
* and PAGE_OFFSET - it must be within 32MB of the kernel text. * and PAGE_OFFSET - it must be within 32MB of the kernel text.
*/ */
#ifndef CONFIG_THUMB2_KERNEL #ifndef CONFIG_THUMB2_KERNEL
#define MODULES_VADDR (PAGE_OFFSET - 16*1024*1024) #define MODULES_VADDR (PAGE_OFFSET - SZ_16M)
#else #else
/* smaller range for Thumb-2 symbols relocation (2^24)*/ /* smaller range for Thumb-2 symbols relocation (2^24)*/
#define MODULES_VADDR (PAGE_OFFSET - 8*1024*1024) #define MODULES_VADDR (PAGE_OFFSET - SZ_8M)
#endif #endif
#if TASK_SIZE > MODULES_VADDR #if TASK_SIZE > MODULES_VADDR
......
...@@ -92,6 +92,7 @@ static inline void outer_flush_range(phys_addr_t start, phys_addr_t end) ...@@ -92,6 +92,7 @@ static inline void outer_flush_range(phys_addr_t start, phys_addr_t end)
static inline void outer_flush_all(void) { } static inline void outer_flush_all(void) { }
static inline void outer_inv_all(void) { } static inline void outer_inv_all(void) { }
static inline void outer_disable(void) { } static inline void outer_disable(void) { }
static inline void outer_resume(void) { }
#endif #endif
......
...@@ -119,22 +119,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock) ...@@ -119,22 +119,8 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)
static inline void arch_spin_unlock(arch_spinlock_t *lock) static inline void arch_spin_unlock(arch_spinlock_t *lock)
{ {
unsigned long tmp;
u32 slock;
smp_mb(); smp_mb();
lock->tickets.owner++;
__asm__ __volatile__(
" mov %1, #1\n"
"1: ldrex %0, [%2]\n"
" uadd16 %0, %0, %1\n"
" strex %1, %0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (slock), "=&r" (tmp)
: "r" (&lock->slock)
: "cc");
dsb_sev(); dsb_sev();
} }
......
...@@ -413,7 +413,7 @@ static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) ...@@ -413,7 +413,7 @@ static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
return irq; return irq;
} }
static int __init pcibios_init_resources(int busnr, struct pci_sys_data *sys) static int pcibios_init_resources(int busnr, struct pci_sys_data *sys)
{ {
int ret; int ret;
struct pci_host_bridge_window *window; struct pci_host_bridge_window *window;
...@@ -445,7 +445,7 @@ static int __init pcibios_init_resources(int busnr, struct pci_sys_data *sys) ...@@ -445,7 +445,7 @@ static int __init pcibios_init_resources(int busnr, struct pci_sys_data *sys)
return 0; return 0;
} }
static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head) static void pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
{ {
struct pci_sys_data *sys = NULL; struct pci_sys_data *sys = NULL;
int ret; int ret;
...@@ -464,6 +464,9 @@ static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head) ...@@ -464,6 +464,9 @@ static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
sys->map_irq = hw->map_irq; sys->map_irq = hw->map_irq;
INIT_LIST_HEAD(&sys->resources); INIT_LIST_HEAD(&sys->resources);
if (hw->private_data)
sys->private_data = hw->private_data[nr];
ret = hw->setup(nr, sys); ret = hw->setup(nr, sys);
if (ret > 0) { if (ret > 0) {
...@@ -493,7 +496,7 @@ static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head) ...@@ -493,7 +496,7 @@ static void __init pcibios_init_hw(struct hw_pci *hw, struct list_head *head)
} }
} }
void __init pci_common_init(struct hw_pci *hw) void pci_common_init(struct hw_pci *hw)
{ {
struct pci_sys_data *sys; struct pci_sys_data *sys;
LIST_HEAD(head); LIST_HEAD(head);
......
...@@ -93,11 +93,11 @@ static void notrace update_sched_clock(void) ...@@ -93,11 +93,11 @@ static void notrace update_sched_clock(void)
* detectable in cyc_to_fixed_sched_clock(). * detectable in cyc_to_fixed_sched_clock().
*/ */
raw_local_irq_save(flags); raw_local_irq_save(flags);
cd.epoch_cyc = cyc; cd.epoch_cyc_copy = cyc;
smp_wmb(); smp_wmb();
cd.epoch_ns = ns; cd.epoch_ns = ns;
smp_wmb(); smp_wmb();
cd.epoch_cyc_copy = cyc; cd.epoch_cyc = cyc;
raw_local_irq_restore(flags); raw_local_irq_restore(flags);
} }
......
...@@ -125,18 +125,6 @@ void __init smp_init_cpus(void) ...@@ -125,18 +125,6 @@ void __init smp_init_cpus(void)
smp_ops.smp_init_cpus(); smp_ops.smp_init_cpus();
} }
static void __init platform_smp_prepare_cpus(unsigned int max_cpus)
{
if (smp_ops.smp_prepare_cpus)
smp_ops.smp_prepare_cpus(max_cpus);
}
static void __cpuinit platform_secondary_init(unsigned int cpu)
{
if (smp_ops.smp_secondary_init)
smp_ops.smp_secondary_init(cpu);
}
int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle) int __cpuinit boot_secondary(unsigned int cpu, struct task_struct *idle)
{ {
if (smp_ops.smp_boot_secondary) if (smp_ops.smp_boot_secondary)
...@@ -154,12 +142,6 @@ static int platform_cpu_kill(unsigned int cpu) ...@@ -154,12 +142,6 @@ static int platform_cpu_kill(unsigned int cpu)
return 1; return 1;
} }
static void platform_cpu_die(unsigned int cpu)
{
if (smp_ops.cpu_die)
smp_ops.cpu_die(cpu);
}
static int platform_cpu_disable(unsigned int cpu) static int platform_cpu_disable(unsigned int cpu)
{ {
if (smp_ops.cpu_disable) if (smp_ops.cpu_disable)
...@@ -257,7 +239,8 @@ void __ref cpu_die(void) ...@@ -257,7 +239,8 @@ void __ref cpu_die(void)
* actual CPU shutdown procedure is at least platform (if not * actual CPU shutdown procedure is at least platform (if not
* CPU) specific. * CPU) specific.
*/ */
platform_cpu_die(cpu); if (smp_ops.cpu_die)
smp_ops.cpu_die(cpu);
/* /*
* Do not return to the idle loop - jump back to the secondary * Do not return to the idle loop - jump back to the secondary
...@@ -324,7 +307,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void) ...@@ -324,7 +307,8 @@ asmlinkage void __cpuinit secondary_start_kernel(void)
/* /*
* Give the platform a chance to do its own initialisation. * Give the platform a chance to do its own initialisation.
*/ */
platform_secondary_init(cpu); if (smp_ops.smp_secondary_init)
smp_ops.smp_secondary_init(cpu);
notify_cpu_starting(cpu); notify_cpu_starting(cpu);
...@@ -399,8 +383,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus) ...@@ -399,8 +383,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
/* /*
* Initialise the present map, which describes the set of CPUs * Initialise the present map, which describes the set of CPUs
* actually populated at the present time. A platform should * actually populated at the present time. A platform should
* re-initialize the map in platform_smp_prepare_cpus() if * re-initialize the map in the platforms smp_prepare_cpus()
* present != possible (e.g. physical hotplug). * if present != possible (e.g. physical hotplug).
*/ */
init_cpu_present(cpu_possible_mask); init_cpu_present(cpu_possible_mask);
...@@ -408,7 +392,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus) ...@@ -408,7 +392,8 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
* Initialise the SCU if there are more than one CPU * Initialise the SCU if there are more than one CPU
* and let them know where to start. * and let them know where to start.
*/ */
platform_smp_prepare_cpus(max_cpus); if (smp_ops.smp_prepare_cpus)
smp_ops.smp_prepare_cpus(max_cpus);
} }
} }
......
...@@ -36,6 +36,7 @@ ...@@ -36,6 +36,7 @@
#include <linux/gfp.h> #include <linux/gfp.h>
#include <linux/clkdev.h> #include <linux/clkdev.h>
#include <linux/mtd/physmap.h> #include <linux/mtd/physmap.h>
#include <linux/bitops.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/hardware/arm_timer.h> #include <asm/hardware/arm_timer.h>
...@@ -65,16 +66,28 @@ ...@@ -65,16 +66,28 @@
#define VA_VIC_BASE __io_address(VERSATILE_VIC_BASE) #define VA_VIC_BASE __io_address(VERSATILE_VIC_BASE)
#define VA_SIC_BASE __io_address(VERSATILE_SIC_BASE) #define VA_SIC_BASE __io_address(VERSATILE_SIC_BASE)
/* These PIC IRQs are valid in each configuration */
#define PIC_VALID_ALL BIT(SIC_INT_KMI0) | BIT(SIC_INT_KMI1) | \
BIT(SIC_INT_SCI3) | BIT(SIC_INT_UART3) | \
BIT(SIC_INT_CLCD) | BIT(SIC_INT_TOUCH) | \
BIT(SIC_INT_KEYPAD) | BIT(SIC_INT_DoC) | \
BIT(SIC_INT_USB) | BIT(SIC_INT_PCI0) | \
BIT(SIC_INT_PCI1) | BIT(SIC_INT_PCI2) | \
BIT(SIC_INT_PCI3)
#if 1 #if 1
#define IRQ_MMCI0A IRQ_VICSOURCE22 #define IRQ_MMCI0A IRQ_VICSOURCE22
#define IRQ_AACI IRQ_VICSOURCE24 #define IRQ_AACI IRQ_VICSOURCE24
#define IRQ_ETH IRQ_VICSOURCE25 #define IRQ_ETH IRQ_VICSOURCE25
#define PIC_MASK 0xFFD00000 #define PIC_MASK 0xFFD00000
#define PIC_VALID PIC_VALID_ALL
#else #else
#define IRQ_MMCI0A IRQ_SIC_MMCI0A #define IRQ_MMCI0A IRQ_SIC_MMCI0A
#define IRQ_AACI IRQ_SIC_AACI #define IRQ_AACI IRQ_SIC_AACI
#define IRQ_ETH IRQ_SIC_ETH #define IRQ_ETH IRQ_SIC_ETH
#define PIC_MASK 0 #define PIC_MASK 0
#define PIC_VALID PIC_VALID_ALL | BIT(SIC_INT_MMCI0A) | \
BIT(SIC_INT_MMCI1A) | BIT(SIC_INT_AACI) | \
BIT(SIC_INT_ETH)
#endif #endif
/* Lookup table for finding a DT node that represents the vic instance */ /* Lookup table for finding a DT node that represents the vic instance */
...@@ -102,7 +115,7 @@ void __init versatile_init_irq(void) ...@@ -102,7 +115,7 @@ void __init versatile_init_irq(void)
VERSATILE_SIC_BASE); VERSATILE_SIC_BASE);
fpga_irq_init(VA_SIC_BASE, "SIC", IRQ_SIC_START, fpga_irq_init(VA_SIC_BASE, "SIC", IRQ_SIC_START,
IRQ_VICSOURCE31, ~PIC_MASK, np); IRQ_VICSOURCE31, PIC_VALID, np);
/* /*
* Interrupts on secondary controller from 0 to 8 are routed to * Interrupts on secondary controller from 0 to 8 are routed to
......
...@@ -23,6 +23,7 @@ ...@@ -23,6 +23,7 @@
#include <linux/io.h> #include <linux/io.h>
#include <mach/hardware.h> #include <mach/hardware.h>
#include <mach/irqs.h>
#include <asm/irq.h> #include <asm/irq.h>
#include <asm/mach/pci.h> #include <asm/mach/pci.h>
...@@ -327,12 +328,12 @@ static int __init versatile_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) ...@@ -327,12 +328,12 @@ static int __init versatile_map_irq(const struct pci_dev *dev, u8 slot, u8 pin)
int irq; int irq;
/* slot, pin, irq /* slot, pin, irq
* 24 1 27 * 24 1 IRQ_SIC_PCI0
* 25 1 28 * 25 1 IRQ_SIC_PCI1
* 26 1 29 * 26 1 IRQ_SIC_PCI2
* 27 1 30 * 27 1 IRQ_SIC_PCI3
*/ */
irq = 27 + ((slot - 24 + pin - 1) & 3); irq = IRQ_SIC_PCI0 + ((slot - 24 + pin - 1) & 3);
return irq; return irq;
} }
......
...@@ -6,7 +6,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \ ...@@ -6,7 +6,7 @@ obj-y := dma-mapping.o extable.o fault.o init.o \
iomap.o iomap.o
obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \ obj-$(CONFIG_MMU) += fault-armv.o flush.o idmap.o ioremap.o \
mmap.o pgd.o mmu.o vmregion.o mmap.o pgd.o mmu.o
ifneq ($(CONFIG_MMU),y) ifneq ($(CONFIG_MMU),y)
obj-y += nommu.o obj-y += nommu.o
......
...@@ -34,6 +34,9 @@ ...@@ -34,6 +34,9 @@
* The ASID is used to tag entries in the CPU caches and TLBs. * The ASID is used to tag entries in the CPU caches and TLBs.
* The context ID is used by debuggers and trace logic, and * The context ID is used by debuggers and trace logic, and
* should be unique within all running processes. * should be unique within all running processes.
*
* In big endian operation, the two 32 bit words are swapped if accesed by
* non 64-bit operations.
*/ */
#define ASID_FIRST_VERSION (1ULL << ASID_BITS) #define ASID_FIRST_VERSION (1ULL << ASID_BITS)
#define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1) #define NUM_USER_ASIDS (ASID_FIRST_VERSION - 1)
......
...@@ -39,6 +39,70 @@ ...@@ -39,6 +39,70 @@
#include <asm/mach/pci.h> #include <asm/mach/pci.h>
#include "mm.h" #include "mm.h"
LIST_HEAD(static_vmlist);
static struct static_vm *find_static_vm_paddr(phys_addr_t paddr,
size_t size, unsigned int mtype)
{
struct static_vm *svm;
struct vm_struct *vm;
list_for_each_entry(svm, &static_vmlist, list) {
vm = &svm->vm;
if (!(vm->flags & VM_ARM_STATIC_MAPPING))
continue;
if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
continue;
if (vm->phys_addr > paddr ||
paddr + size - 1 > vm->phys_addr + vm->size - 1)
continue;
return svm;
}
return NULL;
}
struct static_vm *find_static_vm_vaddr(void *vaddr)
{
struct static_vm *svm;
struct vm_struct *vm;
list_for_each_entry(svm, &static_vmlist, list) {
vm = &svm->vm;
/* static_vmlist is ascending order */
if (vm->addr > vaddr)
break;
if (vm->addr <= vaddr && vm->addr + vm->size > vaddr)
return svm;
}
return NULL;
}
void __init add_static_vm_early(struct static_vm *svm)
{
struct static_vm *curr_svm;
struct vm_struct *vm;
void *vaddr;
vm = &svm->vm;
vm_area_add_early(vm);
vaddr = vm->addr;
list_for_each_entry(curr_svm, &static_vmlist, list) {
vm = &curr_svm->vm;
if (vm->addr > vaddr)
break;
}
list_add_tail(&svm->list, &curr_svm->list);
}
int ioremap_page(unsigned long virt, unsigned long phys, int ioremap_page(unsigned long virt, unsigned long phys,
const struct mem_type *mtype) const struct mem_type *mtype)
{ {
...@@ -197,13 +261,14 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, ...@@ -197,13 +261,14 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
const struct mem_type *type; const struct mem_type *type;
int err; int err;
unsigned long addr; unsigned long addr;
struct vm_struct * area; struct vm_struct *area;
phys_addr_t paddr = __pfn_to_phys(pfn);
#ifndef CONFIG_ARM_LPAE #ifndef CONFIG_ARM_LPAE
/* /*
* High mappings must be supersection aligned * High mappings must be supersection aligned
*/ */
if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK)) if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK))
return NULL; return NULL;
#endif #endif
...@@ -219,24 +284,16 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, ...@@ -219,24 +284,16 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
/* /*
* Try to reuse one of the static mapping whenever possible. * Try to reuse one of the static mapping whenever possible.
*/ */
read_lock(&vmlist_lock); if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) {
for (area = vmlist; area; area = area->next) { struct static_vm *svm;
if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000))
break; svm = find_static_vm_paddr(paddr, size, mtype);
if (!(area->flags & VM_ARM_STATIC_MAPPING)) if (svm) {
continue; addr = (unsigned long)svm->vm.addr;
if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype)) addr += paddr - svm->vm.phys_addr;
continue;
if (__phys_to_pfn(area->phys_addr) > pfn ||
__pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
continue;
/* we can drop the lock here as we know *area is static */
read_unlock(&vmlist_lock);
addr = (unsigned long)area->addr;
addr += __pfn_to_phys(pfn) - area->phys_addr;
return (void __iomem *) (offset + addr); return (void __iomem *) (offset + addr);
} }
read_unlock(&vmlist_lock); }
/* /*
* Don't allow RAM to be mapped - this causes problems with ARMv6+ * Don't allow RAM to be mapped - this causes problems with ARMv6+
...@@ -248,21 +305,21 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn, ...@@ -248,21 +305,21 @@ void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
if (!area) if (!area)
return NULL; return NULL;
addr = (unsigned long)area->addr; addr = (unsigned long)area->addr;
area->phys_addr = __pfn_to_phys(pfn); area->phys_addr = paddr;
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
if (DOMAIN_IO == 0 && if (DOMAIN_IO == 0 &&
(((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) || (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
cpu_is_xsc3()) && pfn >= 0x100000 && cpu_is_xsc3()) && pfn >= 0x100000 &&
!((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) { !((paddr | size | addr) & ~SUPERSECTION_MASK)) {
area->flags |= VM_ARM_SECTION_MAPPING; area->flags |= VM_ARM_SECTION_MAPPING;
err = remap_area_supersections(addr, pfn, size, type); err = remap_area_supersections(addr, pfn, size, type);
} else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) { } else if (!((paddr | size | addr) & ~PMD_MASK)) {
area->flags |= VM_ARM_SECTION_MAPPING; area->flags |= VM_ARM_SECTION_MAPPING;
err = remap_area_sections(addr, pfn, size, type); err = remap_area_sections(addr, pfn, size, type);
} else } else
#endif #endif
err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn), err = ioremap_page_range(addr, addr + size, paddr,
__pgprot(type->prot_pte)); __pgprot(type->prot_pte));
if (err) { if (err) {
...@@ -346,34 +403,28 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached) ...@@ -346,34 +403,28 @@ __arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
void __iounmap(volatile void __iomem *io_addr) void __iounmap(volatile void __iomem *io_addr)
{ {
void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr); void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
struct vm_struct *vm; struct static_vm *svm;
read_lock(&vmlist_lock); /* If this is a static mapping, we must leave it alone */
for (vm = vmlist; vm; vm = vm->next) { svm = find_static_vm_vaddr(addr);
if (vm->addr > addr) if (svm)
break;
if (!(vm->flags & VM_IOREMAP))
continue;
/* If this is a static mapping we must leave it alone */
if ((vm->flags & VM_ARM_STATIC_MAPPING) &&
(vm->addr <= addr) && (vm->addr + vm->size > addr)) {
read_unlock(&vmlist_lock);
return; return;
}
#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE) #if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
{
struct vm_struct *vm;
vm = find_vm_area(addr);
/* /*
* If this is a section based mapping we need to handle it * If this is a section based mapping we need to handle it
* specially as the VM subsystem does not know how to handle * specially as the VM subsystem does not know how to handle
* such a beast. * such a beast.
*/ */
if ((vm->addr == addr) && if (vm && (vm->flags & VM_ARM_SECTION_MAPPING))
(vm->flags & VM_ARM_SECTION_MAPPING)) {
unmap_area_sections((unsigned long)vm->addr, vm->size); unmap_area_sections((unsigned long)vm->addr, vm->size);
break;
} }
#endif #endif
}
read_unlock(&vmlist_lock);
vunmap(addr); vunmap(addr);
} }
......
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
#include <linux/list.h>
#include <linux/vmalloc.h>
/* the upper-most page table pointer */ /* the upper-most page table pointer */
extern pmd_t *top_pmd; extern pmd_t *top_pmd;
...@@ -65,6 +67,16 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page ...@@ -65,6 +67,16 @@ extern void __flush_dcache_page(struct address_space *mapping, struct page *page
/* consistent regions used by dma_alloc_attrs() */ /* consistent regions used by dma_alloc_attrs() */
#define VM_ARM_DMA_CONSISTENT 0x20000000 #define VM_ARM_DMA_CONSISTENT 0x20000000
struct static_vm {
struct vm_struct vm;
struct list_head list;
};
extern struct list_head static_vmlist;
extern struct static_vm *find_static_vm_vaddr(void *vaddr);
extern __init void add_static_vm_early(struct static_vm *svm);
#endif #endif
#ifdef CONFIG_ZONE_DMA #ifdef CONFIG_ZONE_DMA
......
...@@ -757,21 +757,24 @@ void __init iotable_init(struct map_desc *io_desc, int nr) ...@@ -757,21 +757,24 @@ void __init iotable_init(struct map_desc *io_desc, int nr)
{ {
struct map_desc *md; struct map_desc *md;
struct vm_struct *vm; struct vm_struct *vm;
struct static_vm *svm;
if (!nr) if (!nr)
return; return;
vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm)); svm = early_alloc_aligned(sizeof(*svm) * nr, __alignof__(*svm));
for (md = io_desc; nr; md++, nr--) { for (md = io_desc; nr; md++, nr--) {
create_mapping(md); create_mapping(md);
vm = &svm->vm;
vm->addr = (void *)(md->virtual & PAGE_MASK); vm->addr = (void *)(md->virtual & PAGE_MASK);
vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
vm->phys_addr = __pfn_to_phys(md->pfn); vm->phys_addr = __pfn_to_phys(md->pfn);
vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING; vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
vm->flags |= VM_ARM_MTYPE(md->type); vm->flags |= VM_ARM_MTYPE(md->type);
vm->caller = iotable_init; vm->caller = iotable_init;
vm_area_add_early(vm++); add_static_vm_early(svm++);
} }
} }
...@@ -779,13 +782,16 @@ void __init vm_reserve_area_early(unsigned long addr, unsigned long size, ...@@ -779,13 +782,16 @@ void __init vm_reserve_area_early(unsigned long addr, unsigned long size,
void *caller) void *caller)
{ {
struct vm_struct *vm; struct vm_struct *vm;
struct static_vm *svm;
svm = early_alloc_aligned(sizeof(*svm), __alignof__(*svm));
vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm)); vm = &svm->vm;
vm->addr = (void *)addr; vm->addr = (void *)addr;
vm->size = size; vm->size = size;
vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING; vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
vm->caller = caller; vm->caller = caller;
vm_area_add_early(vm); add_static_vm_early(svm);
} }
#ifndef CONFIG_ARM_LPAE #ifndef CONFIG_ARM_LPAE
...@@ -810,14 +816,13 @@ static void __init pmd_empty_section_gap(unsigned long addr) ...@@ -810,14 +816,13 @@ static void __init pmd_empty_section_gap(unsigned long addr)
static void __init fill_pmd_gaps(void) static void __init fill_pmd_gaps(void)
{ {
struct static_vm *svm;
struct vm_struct *vm; struct vm_struct *vm;
unsigned long addr, next = 0; unsigned long addr, next = 0;
pmd_t *pmd; pmd_t *pmd;
/* we're still single threaded hence no lock needed here */ list_for_each_entry(svm, &static_vmlist, list) {
for (vm = vmlist; vm; vm = vm->next) { vm = &svm->vm;
if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING)))
continue;
addr = (unsigned long)vm->addr; addr = (unsigned long)vm->addr;
if (addr < next) if (addr < next)
continue; continue;
...@@ -857,19 +862,12 @@ static void __init fill_pmd_gaps(void) ...@@ -857,19 +862,12 @@ static void __init fill_pmd_gaps(void)
#if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H) #if defined(CONFIG_PCI) && !defined(CONFIG_NEED_MACH_IO_H)
static void __init pci_reserve_io(void) static void __init pci_reserve_io(void)
{ {
struct vm_struct *vm; struct static_vm *svm;
unsigned long addr;
/* we're still single threaded hence no lock needed here */ svm = find_static_vm_vaddr((void *)PCI_IO_VIRT_BASE);
for (vm = vmlist; vm; vm = vm->next) { if (svm)
if (!(vm->flags & VM_ARM_STATIC_MAPPING))
continue;
addr = (unsigned long)vm->addr;
addr &= ~(SZ_2M - 1);
if (addr == PCI_IO_VIRT_BASE)
return; return;
}
vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io); vm_reserve_area_early(PCI_IO_VIRT_BASE, SZ_2M, pci_reserve_io);
} }
#else #else
......
...@@ -38,9 +38,14 @@ ...@@ -38,9 +38,14 @@
/* /*
* mmid - get context id from mm pointer (mm->context.id) * mmid - get context id from mm pointer (mm->context.id)
* note, this field is 64bit, so in big-endian the two words are swapped too.
*/ */
.macro mmid, rd, rn .macro mmid, rd, rn
#ifdef __ARMEB__
ldr \rd, [\rn, #MM_CONTEXT_ID + 4 ]
#else
ldr \rd, [\rn, #MM_CONTEXT_ID] ldr \rd, [\rn, #MM_CONTEXT_ID]
#endif
.endm .endm
/* /*
......
...@@ -101,7 +101,7 @@ ENTRY(cpu_v6_dcache_clean_area) ...@@ -101,7 +101,7 @@ ENTRY(cpu_v6_dcache_clean_area)
ENTRY(cpu_v6_switch_mm) ENTRY(cpu_v6_switch_mm)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
mov r2, #0 mov r2, #0
ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id mmid r1, r1 @ get mm->context.id
ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
ALT_UP(orr r0, r0, #TTB_FLAGS_UP) ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB mcr p15, 0, r2, c7, c5, 6 @ flush BTAC/BTB
......
...@@ -40,7 +40,7 @@ ...@@ -40,7 +40,7 @@
ENTRY(cpu_v7_switch_mm) ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
mov r2, #0 mov r2, #0
ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id mmid r1, r1 @ get mm->context.id
ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP) ALT_SMP(orr r0, r0, #TTB_FLAGS_SMP)
ALT_UP(orr r0, r0, #TTB_FLAGS_UP) ALT_UP(orr r0, r0, #TTB_FLAGS_UP)
#ifdef CONFIG_ARM_ERRATA_430973 #ifdef CONFIG_ARM_ERRATA_430973
......
...@@ -47,7 +47,7 @@ ...@@ -47,7 +47,7 @@
*/ */
ENTRY(cpu_v7_switch_mm) ENTRY(cpu_v7_switch_mm)
#ifdef CONFIG_MMU #ifdef CONFIG_MMU
ldr r1, [r1, #MM_CONTEXT_ID] @ get mm->context.id mmid r1, r1 @ get mm->context.id
and r3, r1, #0xff and r3, r1, #0xff
mov r3, r3, lsl #(48 - 32) @ ASID mov r3, r3, lsl #(48 - 32) @ ASID
mcrr p15, 0, r0, r3, c2 @ set TTB 0 mcrr p15, 0, r0, r3, c2 @ set TTB 0
......
#include <linux/fs.h>
#include <linux/spinlock.h>
#include <linux/list.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
#include <linux/slab.h>
#include "vmregion.h"
/*
* VM region handling support.
*
* This should become something generic, handling VM region allocations for
* vmalloc and similar (ioremap, module space, etc).
*
* I envisage vmalloc()'s supporting vm_struct becoming:
*
* struct vm_struct {
* struct vmregion region;
* unsigned long flags;
* struct page **pages;
* unsigned int nr_pages;
* unsigned long phys_addr;
* };
*
* get_vm_area() would then call vmregion_alloc with an appropriate
* struct vmregion head (eg):
*
* struct vmregion vmalloc_head = {
* .vm_list = LIST_HEAD_INIT(vmalloc_head.vm_list),
* .vm_start = VMALLOC_START,
* .vm_end = VMALLOC_END,
* };
*
* However, vmalloc_head.vm_start is variable (typically, it is dependent on
* the amount of RAM found at boot time.) I would imagine that get_vm_area()
* would have to initialise this each time prior to calling vmregion_alloc().
*/
struct arm_vmregion *
arm_vmregion_alloc(struct arm_vmregion_head *head, size_t align,
size_t size, gfp_t gfp, const void *caller)
{
unsigned long start = head->vm_start, addr = head->vm_end;
unsigned long flags;
struct arm_vmregion *c, *new;
if (head->vm_end - head->vm_start < size) {
printk(KERN_WARNING "%s: allocation too big (requested %#x)\n",
__func__, size);
goto out;
}
new = kmalloc(sizeof(struct arm_vmregion), gfp);
if (!new)
goto out;
new->caller = caller;
spin_lock_irqsave(&head->vm_lock, flags);
addr = rounddown(addr - size, align);
list_for_each_entry_reverse(c, &head->vm_list, vm_list) {
if (addr >= c->vm_end)
goto found;
addr = rounddown(c->vm_start - size, align);
if (addr < start)
goto nospc;
}
found:
/*
* Insert this entry after the one we found.
*/
list_add(&new->vm_list, &c->vm_list);
new->vm_start = addr;
new->vm_end = addr + size;
new->vm_active = 1;
spin_unlock_irqrestore(&head->vm_lock, flags);
return new;
nospc:
spin_unlock_irqrestore(&head->vm_lock, flags);
kfree(new);
out:
return NULL;
}
static struct arm_vmregion *__arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr)
{
struct arm_vmregion *c;
list_for_each_entry(c, &head->vm_list, vm_list) {
if (c->vm_active && c->vm_start == addr)
goto out;
}
c = NULL;
out:
return c;
}
struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *head, unsigned long addr)
{
struct arm_vmregion *c;
unsigned long flags;
spin_lock_irqsave(&head->vm_lock, flags);
c = __arm_vmregion_find(head, addr);
spin_unlock_irqrestore(&head->vm_lock, flags);
return c;
}
struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *head, unsigned long addr)
{
struct arm_vmregion *c;
unsigned long flags;
spin_lock_irqsave(&head->vm_lock, flags);
c = __arm_vmregion_find(head, addr);
if (c)
c->vm_active = 0;
spin_unlock_irqrestore(&head->vm_lock, flags);
return c;
}
void arm_vmregion_free(struct arm_vmregion_head *head, struct arm_vmregion *c)
{
unsigned long flags;
spin_lock_irqsave(&head->vm_lock, flags);
list_del(&c->vm_list);
spin_unlock_irqrestore(&head->vm_lock, flags);
kfree(c);
}
#ifdef CONFIG_PROC_FS
static int arm_vmregion_show(struct seq_file *m, void *p)
{
struct arm_vmregion *c = list_entry(p, struct arm_vmregion, vm_list);
seq_printf(m, "0x%08lx-0x%08lx %7lu", c->vm_start, c->vm_end,
c->vm_end - c->vm_start);
if (c->caller)
seq_printf(m, " %pS", (void *)c->caller);
seq_putc(m, '\n');
return 0;
}
static void *arm_vmregion_start(struct seq_file *m, loff_t *pos)
{
struct arm_vmregion_head *h = m->private;
spin_lock_irq(&h->vm_lock);
return seq_list_start(&h->vm_list, *pos);
}
static void *arm_vmregion_next(struct seq_file *m, void *p, loff_t *pos)
{
struct arm_vmregion_head *h = m->private;
return seq_list_next(p, &h->vm_list, pos);
}
static void arm_vmregion_stop(struct seq_file *m, void *p)
{
struct arm_vmregion_head *h = m->private;
spin_unlock_irq(&h->vm_lock);
}
static const struct seq_operations arm_vmregion_ops = {
.start = arm_vmregion_start,
.stop = arm_vmregion_stop,
.next = arm_vmregion_next,
.show = arm_vmregion_show,
};
static int arm_vmregion_open(struct inode *inode, struct file *file)
{
struct arm_vmregion_head *h = PDE(inode)->data;
int ret = seq_open(file, &arm_vmregion_ops);
if (!ret) {
struct seq_file *m = file->private_data;
m->private = h;
}
return ret;
}
static const struct file_operations arm_vmregion_fops = {
.open = arm_vmregion_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release,
};
int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h)
{
proc_create_data(path, S_IRUSR, NULL, &arm_vmregion_fops, h);
return 0;
}
#else
int arm_vmregion_create_proc(const char *path, struct arm_vmregion_head *h)
{
return 0;
}
#endif
#ifndef VMREGION_H
#define VMREGION_H
#include <linux/spinlock.h>
#include <linux/list.h>
struct page;
struct arm_vmregion_head {
spinlock_t vm_lock;
struct list_head vm_list;
unsigned long vm_start;
unsigned long vm_end;
};
struct arm_vmregion {
struct list_head vm_list;
unsigned long vm_start;
unsigned long vm_end;
int vm_active;
const void *caller;
};
struct arm_vmregion *arm_vmregion_alloc(struct arm_vmregion_head *, size_t, size_t, gfp_t, const void *);
struct arm_vmregion *arm_vmregion_find(struct arm_vmregion_head *, unsigned long);
struct arm_vmregion *arm_vmregion_find_remove(struct arm_vmregion_head *, unsigned long);
void arm_vmregion_free(struct arm_vmregion_head *, struct arm_vmregion *);
int arm_vmregion_create_proc(const char *, struct arm_vmregion_head *);
#endif
This diff is collapsed.
...@@ -28,6 +28,8 @@ ...@@ -28,6 +28,8 @@
#define MCI_ST_UX500_NEG_EDGE (1 << 13) #define MCI_ST_UX500_NEG_EDGE (1 << 13)
#define MCI_ST_UX500_HWFCEN (1 << 14) #define MCI_ST_UX500_HWFCEN (1 << 14)
#define MCI_ST_UX500_CLK_INV (1 << 15) #define MCI_ST_UX500_CLK_INV (1 << 15)
/* Modified PL180 on Versatile Express platform */
#define MCI_ARM_HWFCEN (1 << 12)
#define MMCIARGUMENT 0x008 #define MMCIARGUMENT 0x008
#define MMCICOMMAND 0x00c #define MMCICOMMAND 0x00c
...@@ -193,7 +195,6 @@ struct mmci_host { ...@@ -193,7 +195,6 @@ struct mmci_host {
/* pio stuff */ /* pio stuff */
struct sg_mapping_iter sg_miter; struct sg_mapping_iter sg_miter;
unsigned int size; unsigned int size;
struct regulator *vcc;
/* pinctrl handles */ /* pinctrl handles */
struct pinctrl *pinctrl; struct pinctrl *pinctrl;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment