Commit 3b120ab7 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'fixes' of http://ftp.arm.linux.org.uk/pub/linux/arm/kernel/git-cur/linux-2.6-arm

* 'fixes' of http://ftp.arm.linux.org.uk/pub/linux/arm/kernel/git-cur/linux-2.6-arm:
  ARM: 7182/1: ARM cpu topology: fix warning
  ARM: 7181/1: Restrict kprobes probing SWP instructions to ARMv5 and below
  ARM: 7180/1: Change kprobes testcase with unpredictable STRD instruction
  ARM: 7177/1: GIC: avoid skipping non-existent PPIs in irq_start calculation
  ARM: 7176/1: cpu_pm: register GIC PM notifier only once
  ARM: 7175/1: add subname parameter to mfp_set_groupg callers
  ARM: 7174/1: Fix build error in kprobes test code on Thumb2 kernels
  ARM: 7172/1: dma: Drop GFP_COMP for DMA memory allocations
  ARM: 7171/1: unwind: add unwind directives to bitops assembly macros
  ARM: 7170/2: fix compilation breakage in entry-armv.S
  ARM: 7168/1: use cache type functions for arch_get_unmapped_area
  ARM: perf: check that we have a platform device when reserving PMU
  ARM: 7166/1: Use PMD_SHIFT instead of PGDIR_SHIFT in dma-consistent.c
  ARM: 7165/2: PL330: Fix typo in _prepare_ccr()
  ARM: 7163/2: PL330: Only register usable channels
  ARM: 7162/1: errata: tidy up Kconfig options for PL310 errata workarounds
  ARM: 7161/1: errata: no automatic store buffer drain
  ARM: perf: initialise used_mask for fake PMU during validation
  ARM: PMU: remove pmu_init declaration
  ARM: PMU: re-export release_pmu symbol to modules
parents b930c264 4cbd6b16
...@@ -1231,7 +1231,7 @@ config ARM_ERRATA_742231 ...@@ -1231,7 +1231,7 @@ config ARM_ERRATA_742231
capabilities of the processor. capabilities of the processor.
config PL310_ERRATA_588369 config PL310_ERRATA_588369
bool "Clean & Invalidate maintenance operations do not invalidate clean lines" bool "PL310 errata: Clean & Invalidate maintenance operations do not invalidate clean lines"
depends on CACHE_L2X0 depends on CACHE_L2X0
help help
The PL310 L2 cache controller implements three types of Clean & The PL310 L2 cache controller implements three types of Clean &
...@@ -1256,7 +1256,7 @@ config ARM_ERRATA_720789 ...@@ -1256,7 +1256,7 @@ config ARM_ERRATA_720789
entries regardless of the ASID. entries regardless of the ASID.
config PL310_ERRATA_727915 config PL310_ERRATA_727915
bool "Background Clean & Invalidate by Way operation can cause data corruption" bool "PL310 errata: Background Clean & Invalidate by Way operation can cause data corruption"
depends on CACHE_L2X0 depends on CACHE_L2X0
help help
PL310 implements the Clean & Invalidate by Way L2 cache maintenance PL310 implements the Clean & Invalidate by Way L2 cache maintenance
...@@ -1289,8 +1289,8 @@ config ARM_ERRATA_751472 ...@@ -1289,8 +1289,8 @@ config ARM_ERRATA_751472
operation is received by a CPU before the ICIALLUIS has completed, operation is received by a CPU before the ICIALLUIS has completed,
potentially leading to corrupted entries in the cache or TLB. potentially leading to corrupted entries in the cache or TLB.
config ARM_ERRATA_753970 config PL310_ERRATA_753970
bool "ARM errata: cache sync operation may be faulty" bool "PL310 errata: cache sync operation may be faulty"
depends on CACHE_PL310 depends on CACHE_PL310
help help
This option enables the workaround for the 753970 PL310 (r3p0) erratum. This option enables the workaround for the 753970 PL310 (r3p0) erratum.
...@@ -1352,6 +1352,18 @@ config ARM_ERRATA_764369 ...@@ -1352,6 +1352,18 @@ config ARM_ERRATA_764369
relevant cache maintenance functions and sets a specific bit relevant cache maintenance functions and sets a specific bit
in the diagnostic control register of the SCU. in the diagnostic control register of the SCU.
config PL310_ERRATA_769419
bool "PL310 errata: no automatic Store Buffer drain"
depends on CACHE_L2X0
help
On revisions of the PL310 prior to r3p2, the Store Buffer does
not automatically drain. This can cause normal, non-cacheable
writes to be retained when the memory system is idle, leading
to suboptimal I/O performance for drivers using coherent DMA.
This option adds a write barrier to the cpu_idle loop so that,
on systems with an outer cache, the store buffer is drained
explicitly.
endmenu endmenu
source "arch/arm/common/Kconfig" source "arch/arm/common/Kconfig"
......
...@@ -526,7 +526,8 @@ static void __init gic_pm_init(struct gic_chip_data *gic) ...@@ -526,7 +526,8 @@ static void __init gic_pm_init(struct gic_chip_data *gic)
sizeof(u32)); sizeof(u32));
BUG_ON(!gic->saved_ppi_conf); BUG_ON(!gic->saved_ppi_conf);
cpu_pm_register_notifier(&gic_notifier_block); if (gic == &gic_data[0])
cpu_pm_register_notifier(&gic_notifier_block);
} }
#else #else
static void __init gic_pm_init(struct gic_chip_data *gic) static void __init gic_pm_init(struct gic_chip_data *gic)
...@@ -581,13 +582,16 @@ void __init gic_init(unsigned int gic_nr, int irq_start, ...@@ -581,13 +582,16 @@ void __init gic_init(unsigned int gic_nr, int irq_start,
* For primary GICs, skip over SGIs. * For primary GICs, skip over SGIs.
* For secondary GICs, skip over PPIs, too. * For secondary GICs, skip over PPIs, too.
*/ */
domain->hwirq_base = 32;
if (gic_nr == 0) { if (gic_nr == 0) {
gic_cpu_base_addr = cpu_base; gic_cpu_base_addr = cpu_base;
domain->hwirq_base = 16;
if (irq_start > 0) if ((irq_start & 31) > 0) {
irq_start = (irq_start & ~31) + 16; domain->hwirq_base = 16;
} else if (irq_start != -1)
domain->hwirq_base = 32; irq_start = (irq_start & ~31) + 16;
}
}
/* /*
* Find out how many interrupts are supported. * Find out how many interrupts are supported.
......
...@@ -1211,8 +1211,8 @@ static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc) ...@@ -1211,8 +1211,8 @@ static inline u32 _prepare_ccr(const struct pl330_reqcfg *rqc)
ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT); ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT);
ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT); ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT);
ccr |= (rqc->dcctl << CC_SRCCCTRL_SHFT); ccr |= (rqc->scctl << CC_SRCCCTRL_SHFT);
ccr |= (rqc->scctl << CC_DSTCCTRL_SHFT); ccr |= (rqc->dcctl << CC_DSTCCTRL_SHFT);
ccr |= (rqc->swap << CC_SWAP_SHFT); ccr |= (rqc->swap << CC_SWAP_SHFT);
...@@ -1623,6 +1623,11 @@ static inline int _alloc_event(struct pl330_thread *thrd) ...@@ -1623,6 +1623,11 @@ static inline int _alloc_event(struct pl330_thread *thrd)
return -1; return -1;
} }
static bool _chan_ns(const struct pl330_info *pi, int i)
{
return pi->pcfg.irq_ns & (1 << i);
}
/* Upon success, returns IdentityToken for the /* Upon success, returns IdentityToken for the
* allocated channel, NULL otherwise. * allocated channel, NULL otherwise.
*/ */
...@@ -1647,7 +1652,8 @@ void *pl330_request_channel(const struct pl330_info *pi) ...@@ -1647,7 +1652,8 @@ void *pl330_request_channel(const struct pl330_info *pi)
for (i = 0; i < chans; i++) { for (i = 0; i < chans; i++) {
thrd = &pl330->channels[i]; thrd = &pl330->channels[i];
if (thrd->free) { if ((thrd->free) && (!_manager_ns(thrd) ||
_chan_ns(pi, i))) {
thrd->ev = _alloc_event(thrd); thrd->ev = _alloc_event(thrd);
if (thrd->ev >= 0) { if (thrd->ev >= 0) {
thrd->free = false; thrd->free = false;
......
...@@ -55,16 +55,6 @@ reserve_pmu(enum arm_pmu_type type); ...@@ -55,16 +55,6 @@ reserve_pmu(enum arm_pmu_type type);
extern void extern void
release_pmu(enum arm_pmu_type type); release_pmu(enum arm_pmu_type type);
/**
* init_pmu() - Initialise the PMU.
*
* Initialise the system ready for PMU enabling. This should typically set the
* IRQ affinity and nothing else. The users (oprofile/perf events etc) will do
* the actual hardware initialisation.
*/
extern int
init_pmu(enum arm_pmu_type type);
#else /* CONFIG_CPU_HAS_PMU */ #else /* CONFIG_CPU_HAS_PMU */
#include <linux/err.h> #include <linux/err.h>
......
...@@ -25,7 +25,7 @@ extern struct cputopo_arm cpu_topology[NR_CPUS]; ...@@ -25,7 +25,7 @@ extern struct cputopo_arm cpu_topology[NR_CPUS];
void init_cpu_topology(void); void init_cpu_topology(void);
void store_cpu_topology(unsigned int cpuid); void store_cpu_topology(unsigned int cpuid);
const struct cpumask *cpu_coregroup_mask(unsigned int cpu); const struct cpumask *cpu_coregroup_mask(int cpu);
#else #else
......
...@@ -497,7 +497,7 @@ ENDPROC(__und_usr) ...@@ -497,7 +497,7 @@ ENDPROC(__und_usr)
.popsection .popsection
.pushsection __ex_table,"a" .pushsection __ex_table,"a"
.long 1b, 4b .long 1b, 4b
#if __LINUX_ARM_ARCH__ >= 7 #if CONFIG_ARM_THUMB && __LINUX_ARM_ARCH__ >= 6 && CONFIG_CPU_V7
.long 2b, 4b .long 2b, 4b
.long 3b, 4b .long 3b, 4b
#endif #endif
......
...@@ -519,10 +519,12 @@ static const union decode_item arm_cccc_0000_____1001_table[] = { ...@@ -519,10 +519,12 @@ static const union decode_item arm_cccc_0000_____1001_table[] = {
static const union decode_item arm_cccc_0001_____1001_table[] = { static const union decode_item arm_cccc_0001_____1001_table[] = {
/* Synchronization primitives */ /* Synchronization primitives */
#if __LINUX_ARM_ARCH__ < 6
/* Deprecated on ARMv6 and may be UNDEFINED on v7 */
/* SMP/SWPB cccc 0001 0x00 xxxx xxxx xxxx 1001 xxxx */ /* SMP/SWPB cccc 0001 0x00 xxxx xxxx xxxx 1001 xxxx */
DECODE_EMULATEX (0x0fb000f0, 0x01000090, emulate_rd12rn16rm0_rwflags_nopc, DECODE_EMULATEX (0x0fb000f0, 0x01000090, emulate_rd12rn16rm0_rwflags_nopc,
REGS(NOPC, NOPC, 0, 0, NOPC)), REGS(NOPC, NOPC, 0, 0, NOPC)),
#endif
/* LDREX/STREX{,D,B,H} cccc 0001 1xxx xxxx xxxx xxxx 1001 xxxx */ /* LDREX/STREX{,D,B,H} cccc 0001 1xxx xxxx xxxx xxxx 1001 xxxx */
/* And unallocated instructions... */ /* And unallocated instructions... */
DECODE_END DECODE_END
......
...@@ -427,18 +427,25 @@ void kprobe_arm_test_cases(void) ...@@ -427,18 +427,25 @@ void kprobe_arm_test_cases(void)
TEST_GROUP("Synchronization primitives") TEST_GROUP("Synchronization primitives")
/* #if __LINUX_ARM_ARCH__ < 6
* Use hard coded constants for SWP instructions to avoid warnings TEST_RP("swp lr, r",7,VAL2,", [r",8,0,"]")
* about deprecated instructions. TEST_R( "swpvs r0, r",1,VAL1,", [sp]")
*/ TEST_RP("swp sp, r",14,VAL2,", [r",12,13*4,"]")
TEST_RP( ".word 0xe108e097 @ swp lr, r",7,VAL2,", [r",8,0,"]") #else
TEST_R( ".word 0x610d0091 @ swpvs r0, r",1,VAL1,", [sp]") TEST_UNSUPPORTED(".word 0xe108e097 @ swp lr, r7, [r8]")
TEST_RP( ".word 0xe10cd09e @ swp sp, r",14,VAL2,", [r",12,13*4,"]") TEST_UNSUPPORTED(".word 0x610d0091 @ swpvs r0, r1, [sp]")
TEST_UNSUPPORTED(".word 0xe10cd09e @ swp sp, r14 [r12]")
#endif
TEST_UNSUPPORTED(".word 0xe102f091 @ swp pc, r1, [r2]") TEST_UNSUPPORTED(".word 0xe102f091 @ swp pc, r1, [r2]")
TEST_UNSUPPORTED(".word 0xe102009f @ swp r0, pc, [r2]") TEST_UNSUPPORTED(".word 0xe102009f @ swp r0, pc, [r2]")
TEST_UNSUPPORTED(".word 0xe10f0091 @ swp r0, r1, [pc]") TEST_UNSUPPORTED(".word 0xe10f0091 @ swp r0, r1, [pc]")
TEST_RP( ".word 0xe148e097 @ swpb lr, r",7,VAL2,", [r",8,0,"]") #if __LINUX_ARM_ARCH__ < 6
TEST_R( ".word 0x614d0091 @ swpvsb r0, r",1,VAL1,", [sp]") TEST_RP("swpb lr, r",7,VAL2,", [r",8,0,"]")
TEST_R( "swpvsb r0, r",1,VAL1,", [sp]")
#else
TEST_UNSUPPORTED(".word 0xe148e097 @ swpb lr, r7, [r8]")
TEST_UNSUPPORTED(".word 0x614d0091 @ swpvsb r0, r1, [sp]")
#endif
TEST_UNSUPPORTED(".word 0xe142f091 @ swpb pc, r1, [r2]") TEST_UNSUPPORTED(".word 0xe142f091 @ swpb pc, r1, [r2]")
TEST_UNSUPPORTED(".word 0xe1100090") /* Unallocated space */ TEST_UNSUPPORTED(".word 0xe1100090") /* Unallocated space */
...@@ -550,7 +557,7 @@ void kprobe_arm_test_cases(void) ...@@ -550,7 +557,7 @@ void kprobe_arm_test_cases(void)
TEST_RPR( "strccd r",8, VAL2,", [r",13,0, ", r",12,48,"]") TEST_RPR( "strccd r",8, VAL2,", [r",13,0, ", r",12,48,"]")
TEST_RPR( "strd r",4, VAL1,", [r",2, 24,", r",3, 48,"]!") TEST_RPR( "strd r",4, VAL1,", [r",2, 24,", r",3, 48,"]!")
TEST_RPR( "strcsd r",12,VAL2,", [r",11,48,", -r",10,24,"]!") TEST_RPR( "strcsd r",12,VAL2,", [r",11,48,", -r",10,24,"]!")
TEST_RPR( "strd r",2, VAL1,", [r",3, 24,"], r",4,48,"") TEST_RPR( "strd r",2, VAL1,", [r",5, 24,"], r",4,48,"")
TEST_RPR( "strd r",10,VAL2,", [r",9, 48,"], -r",7,24,"") TEST_RPR( "strd r",10,VAL2,", [r",9, 48,"], -r",7,24,"")
TEST_UNSUPPORTED(".word 0xe1afc0fa @ strd r12, [pc, r10]!") TEST_UNSUPPORTED(".word 0xe1afc0fa @ strd r12, [pc, r10]!")
......
...@@ -222,8 +222,8 @@ void kprobe_thumb16_test_cases(void) ...@@ -222,8 +222,8 @@ void kprobe_thumb16_test_cases(void)
DONT_TEST_IN_ITBLOCK( DONT_TEST_IN_ITBLOCK(
TEST_BF_R( "cbnz r",0,0, ", 2f") TEST_BF_R( "cbnz r",0,0, ", 2f")
TEST_BF_R( "cbz r",2,-1,", 2f") TEST_BF_R( "cbz r",2,-1,", 2f")
TEST_BF_RX( "cbnz r",4,1, ", 2f",0x20) TEST_BF_RX( "cbnz r",4,1, ", 2f", SPACE_0x20)
TEST_BF_RX( "cbz r",7,0, ", 2f",0x40) TEST_BF_RX( "cbz r",7,0, ", 2f", SPACE_0x40)
) )
TEST_R("sxth r0, r",7, HH1,"") TEST_R("sxth r0, r",7, HH1,"")
TEST_R("sxth r7, r",0, HH2,"") TEST_R("sxth r7, r",0, HH2,"")
...@@ -246,7 +246,7 @@ DONT_TEST_IN_ITBLOCK( ...@@ -246,7 +246,7 @@ DONT_TEST_IN_ITBLOCK(
TESTCASE_START(code) \ TESTCASE_START(code) \
TEST_ARG_PTR(13, offset) \ TEST_ARG_PTR(13, offset) \
TEST_ARG_END("") \ TEST_ARG_END("") \
TEST_BRANCH_F(code,0) \ TEST_BRANCH_F(code) \
TESTCASE_END TESTCASE_END
TEST("push {r0}") TEST("push {r0}")
...@@ -319,8 +319,8 @@ CONDITION_INSTRUCTIONS(8, ...@@ -319,8 +319,8 @@ CONDITION_INSTRUCTIONS(8,
TEST_BF( "b 2f") TEST_BF( "b 2f")
TEST_BB( "b 2b") TEST_BB( "b 2b")
TEST_BF_X("b 2f", 0x400) TEST_BF_X("b 2f", SPACE_0x400)
TEST_BB_X("b 2b", 0x400) TEST_BB_X("b 2b", SPACE_0x400)
TEST_GROUP("Testing instructions in IT blocks") TEST_GROUP("Testing instructions in IT blocks")
...@@ -746,7 +746,7 @@ CONDITION_INSTRUCTIONS(22, ...@@ -746,7 +746,7 @@ CONDITION_INSTRUCTIONS(22,
TEST_BB("bne.w 2b") TEST_BB("bne.w 2b")
TEST_BF("bgt.w 2f") TEST_BF("bgt.w 2f")
TEST_BB("blt.w 2b") TEST_BB("blt.w 2b")
TEST_BF_X("bpl.w 2f",0x1000) TEST_BF_X("bpl.w 2f", SPACE_0x1000)
) )
TEST_UNSUPPORTED("msr cpsr, r0") TEST_UNSUPPORTED("msr cpsr, r0")
...@@ -786,11 +786,11 @@ CONDITION_INSTRUCTIONS(22, ...@@ -786,11 +786,11 @@ CONDITION_INSTRUCTIONS(22,
TEST_BF( "b.w 2f") TEST_BF( "b.w 2f")
TEST_BB( "b.w 2b") TEST_BB( "b.w 2b")
TEST_BF_X("b.w 2f", 0x1000) TEST_BF_X("b.w 2f", SPACE_0x1000)
TEST_BF( "bl.w 2f") TEST_BF( "bl.w 2f")
TEST_BB( "bl.w 2b") TEST_BB( "bl.w 2b")
TEST_BB_X("bl.w 2b", 0x1000) TEST_BB_X("bl.w 2b", SPACE_0x1000)
TEST_X( "blx __dummy_arm_subroutine", TEST_X( "blx __dummy_arm_subroutine",
".arm \n\t" ".arm \n\t"
......
...@@ -149,23 +149,31 @@ struct test_arg_end { ...@@ -149,23 +149,31 @@ struct test_arg_end {
"1: "instruction" \n\t" \ "1: "instruction" \n\t" \
" nop \n\t" " nop \n\t"
#define TEST_BRANCH_F(instruction, xtra_dist) \ #define TEST_BRANCH_F(instruction) \
TEST_INSTRUCTION(instruction) \ TEST_INSTRUCTION(instruction) \
".if "#xtra_dist" \n\t" \
" b 99f \n\t" \ " b 99f \n\t" \
".space "#xtra_dist" \n\t" \ "2: nop \n\t"
".endif \n\t" \
#define TEST_BRANCH_B(instruction) \
" b 50f \n\t" \
" b 99f \n\t" \
"2: nop \n\t" \
" b 99f \n\t" \
TEST_INSTRUCTION(instruction)
#define TEST_BRANCH_FX(instruction, codex) \
TEST_INSTRUCTION(instruction) \
" b 99f \n\t" \
codex" \n\t" \
" b 99f \n\t" \ " b 99f \n\t" \
"2: nop \n\t" "2: nop \n\t"
#define TEST_BRANCH_B(instruction, xtra_dist) \ #define TEST_BRANCH_BX(instruction, codex) \
" b 50f \n\t" \ " b 50f \n\t" \
" b 99f \n\t" \ " b 99f \n\t" \
"2: nop \n\t" \ "2: nop \n\t" \
" b 99f \n\t" \ " b 99f \n\t" \
".if "#xtra_dist" \n\t" \ codex" \n\t" \
".space "#xtra_dist" \n\t" \
".endif \n\t" \
TEST_INSTRUCTION(instruction) TEST_INSTRUCTION(instruction)
#define TESTCASE_END \ #define TESTCASE_END \
...@@ -301,47 +309,60 @@ struct test_arg_end { ...@@ -301,47 +309,60 @@ struct test_arg_end {
TESTCASE_START(code1 #reg1 code2) \ TESTCASE_START(code1 #reg1 code2) \
TEST_ARG_PTR(reg1, val1) \ TEST_ARG_PTR(reg1, val1) \
TEST_ARG_END("") \ TEST_ARG_END("") \
TEST_BRANCH_F(code1 #reg1 code2, 0) \ TEST_BRANCH_F(code1 #reg1 code2) \
TESTCASE_END TESTCASE_END
#define TEST_BF_X(code, xtra_dist) \ #define TEST_BF(code) \
TESTCASE_START(code) \ TESTCASE_START(code) \
TEST_ARG_END("") \ TEST_ARG_END("") \
TEST_BRANCH_F(code, xtra_dist) \ TEST_BRANCH_F(code) \
TESTCASE_END TESTCASE_END
#define TEST_BB_X(code, xtra_dist) \ #define TEST_BB(code) \
TESTCASE_START(code) \ TESTCASE_START(code) \
TEST_ARG_END("") \ TEST_ARG_END("") \
TEST_BRANCH_B(code, xtra_dist) \ TEST_BRANCH_B(code) \
TESTCASE_END TESTCASE_END
#define TEST_BF_RX(code1, reg, val, code2, xtra_dist) \ #define TEST_BF_R(code1, reg, val, code2) \
TESTCASE_START(code1 #reg code2) \ TESTCASE_START(code1 #reg code2) \
TEST_ARG_REG(reg, val) \ TEST_ARG_REG(reg, val) \
TEST_ARG_END("") \ TEST_ARG_END("") \
TEST_BRANCH_F(code1 #reg code2, xtra_dist) \ TEST_BRANCH_F(code1 #reg code2) \
TESTCASE_END TESTCASE_END
#define TEST_BB_RX(code1, reg, val, code2, xtra_dist) \ #define TEST_BB_R(code1, reg, val, code2) \
TESTCASE_START(code1 #reg code2) \ TESTCASE_START(code1 #reg code2) \
TEST_ARG_REG(reg, val) \ TEST_ARG_REG(reg, val) \
TEST_ARG_END("") \ TEST_ARG_END("") \
TEST_BRANCH_B(code1 #reg code2, xtra_dist) \ TEST_BRANCH_B(code1 #reg code2) \
TESTCASE_END TESTCASE_END
#define TEST_BF(code) TEST_BF_X(code, 0)
#define TEST_BB(code) TEST_BB_X(code, 0)
#define TEST_BF_R(code1, reg, val, code2) TEST_BF_RX(code1, reg, val, code2, 0)
#define TEST_BB_R(code1, reg, val, code2) TEST_BB_RX(code1, reg, val, code2, 0)
#define TEST_BF_RR(code1, reg1, val1, code2, reg2, val2, code3) \ #define TEST_BF_RR(code1, reg1, val1, code2, reg2, val2, code3) \
TESTCASE_START(code1 #reg1 code2 #reg2 code3) \ TESTCASE_START(code1 #reg1 code2 #reg2 code3) \
TEST_ARG_REG(reg1, val1) \ TEST_ARG_REG(reg1, val1) \
TEST_ARG_REG(reg2, val2) \ TEST_ARG_REG(reg2, val2) \
TEST_ARG_END("") \ TEST_ARG_END("") \
TEST_BRANCH_F(code1 #reg1 code2 #reg2 code3, 0) \ TEST_BRANCH_F(code1 #reg1 code2 #reg2 code3) \
TESTCASE_END
#define TEST_BF_X(code, codex) \
TESTCASE_START(code) \
TEST_ARG_END("") \
TEST_BRANCH_FX(code, codex) \
TESTCASE_END
#define TEST_BB_X(code, codex) \
TESTCASE_START(code) \
TEST_ARG_END("") \
TEST_BRANCH_BX(code, codex) \
TESTCASE_END
#define TEST_BF_RX(code1, reg, val, code2, codex) \
TESTCASE_START(code1 #reg code2) \
TEST_ARG_REG(reg, val) \
TEST_ARG_END("") \
TEST_BRANCH_FX(code1 #reg code2, codex) \
TESTCASE_END TESTCASE_END
#define TEST_X(code, codex) \ #define TEST_X(code, codex) \
...@@ -372,6 +393,25 @@ struct test_arg_end { ...@@ -372,6 +393,25 @@ struct test_arg_end {
TESTCASE_END TESTCASE_END
/*
* Macros for defining space directives spread over multiple lines.
* These are required so the compiler guesses better the length of inline asm
* code and will spill the literal pool early enough to avoid generating PC
* relative loads with out of range offsets.
*/
#define TWICE(x) x x
#define SPACE_0x8 TWICE(".space 4\n\t")
#define SPACE_0x10 TWICE(SPACE_0x8)
#define SPACE_0x20 TWICE(SPACE_0x10)
#define SPACE_0x40 TWICE(SPACE_0x20)
#define SPACE_0x80 TWICE(SPACE_0x40)
#define SPACE_0x100 TWICE(SPACE_0x80)
#define SPACE_0x200 TWICE(SPACE_0x100)
#define SPACE_0x400 TWICE(SPACE_0x200)
#define SPACE_0x800 TWICE(SPACE_0x400)
#define SPACE_0x1000 TWICE(SPACE_0x800)
/* Various values used in test cases... */ /* Various values used in test cases... */
#define N(val) (val ^ 0xffffffff) #define N(val) (val ^ 0xffffffff)
#define VAL1 0x12345678 #define VAL1 0x12345678
......
...@@ -343,8 +343,14 @@ validate_group(struct perf_event *event) ...@@ -343,8 +343,14 @@ validate_group(struct perf_event *event)
{ {
struct perf_event *sibling, *leader = event->group_leader; struct perf_event *sibling, *leader = event->group_leader;
struct pmu_hw_events fake_pmu; struct pmu_hw_events fake_pmu;
DECLARE_BITMAP(fake_used_mask, ARMPMU_MAX_HWEVENTS);
memset(&fake_pmu, 0, sizeof(fake_pmu)); /*
* Initialise the fake PMU. We only need to populate the
* used_mask for the purposes of validation.
*/
memset(fake_used_mask, 0, sizeof(fake_used_mask));
fake_pmu.used_mask = fake_used_mask;
if (!validate_event(&fake_pmu, leader)) if (!validate_event(&fake_pmu, leader))
return -ENOSPC; return -ENOSPC;
...@@ -396,6 +402,9 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu) ...@@ -396,6 +402,9 @@ armpmu_reserve_hardware(struct arm_pmu *armpmu)
int i, err, irq, irqs; int i, err, irq, irqs;
struct platform_device *pmu_device = armpmu->plat_device; struct platform_device *pmu_device = armpmu->plat_device;
if (!pmu_device)
return -ENODEV;
err = reserve_pmu(armpmu->type); err = reserve_pmu(armpmu->type);
if (err) { if (err) {
pr_warning("unable to reserve pmu\n"); pr_warning("unable to reserve pmu\n");
......
...@@ -33,3 +33,4 @@ release_pmu(enum arm_pmu_type type) ...@@ -33,3 +33,4 @@ release_pmu(enum arm_pmu_type type)
{ {
clear_bit_unlock(type, pmu_lock); clear_bit_unlock(type, pmu_lock);
} }
EXPORT_SYMBOL_GPL(release_pmu);
...@@ -192,6 +192,9 @@ void cpu_idle(void) ...@@ -192,6 +192,9 @@ void cpu_idle(void)
#endif #endif
local_irq_disable(); local_irq_disable();
#ifdef CONFIG_PL310_ERRATA_769419
wmb();
#endif
if (hlt_counter) { if (hlt_counter) {
local_irq_enable(); local_irq_enable();
cpu_relax(); cpu_relax();
......
...@@ -43,7 +43,7 @@ ...@@ -43,7 +43,7 @@
struct cputopo_arm cpu_topology[NR_CPUS]; struct cputopo_arm cpu_topology[NR_CPUS];
const struct cpumask *cpu_coregroup_mask(unsigned int cpu) const struct cpumask *cpu_coregroup_mask(int cpu)
{ {
return &cpu_topology[cpu].core_sibling; return &cpu_topology[cpu].core_sibling;
} }
......
#include <asm/unwind.h>
#if __LINUX_ARM_ARCH__ >= 6 #if __LINUX_ARM_ARCH__ >= 6
.macro bitop, instr .macro bitop, name, instr
ENTRY( \name )
UNWIND( .fnstart )
ands ip, r1, #3 ands ip, r1, #3
strneb r1, [ip] @ assert word-aligned strneb r1, [ip] @ assert word-aligned
mov r2, #1 mov r2, #1
...@@ -13,9 +17,13 @@ ...@@ -13,9 +17,13 @@
cmp r0, #0 cmp r0, #0
bne 1b bne 1b
bx lr bx lr
UNWIND( .fnend )
ENDPROC(\name )
.endm .endm
.macro testop, instr, store .macro testop, name, instr, store
ENTRY( \name )
UNWIND( .fnstart )
ands ip, r1, #3 ands ip, r1, #3
strneb r1, [ip] @ assert word-aligned strneb r1, [ip] @ assert word-aligned
mov r2, #1 mov r2, #1
...@@ -34,9 +42,13 @@ ...@@ -34,9 +42,13 @@
cmp r0, #0 cmp r0, #0
movne r0, #1 movne r0, #1
2: bx lr 2: bx lr
UNWIND( .fnend )
ENDPROC(\name )
.endm .endm
#else #else
.macro bitop, instr .macro bitop, name, instr
ENTRY( \name )
UNWIND( .fnstart )
ands ip, r1, #3 ands ip, r1, #3
strneb r1, [ip] @ assert word-aligned strneb r1, [ip] @ assert word-aligned
and r2, r0, #31 and r2, r0, #31
...@@ -49,6 +61,8 @@ ...@@ -49,6 +61,8 @@
str r2, [r1, r0, lsl #2] str r2, [r1, r0, lsl #2]
restore_irqs ip restore_irqs ip
mov pc, lr mov pc, lr
UNWIND( .fnend )
ENDPROC(\name )
.endm .endm
/** /**
...@@ -59,7 +73,9 @@ ...@@ -59,7 +73,9 @@
* Note: we can trivially conditionalise the store instruction * Note: we can trivially conditionalise the store instruction
* to avoid dirtying the data cache. * to avoid dirtying the data cache.
*/ */
.macro testop, instr, store .macro testop, name, instr, store
ENTRY( \name )
UNWIND( .fnstart )
ands ip, r1, #3 ands ip, r1, #3
strneb r1, [ip] @ assert word-aligned strneb r1, [ip] @ assert word-aligned
and r3, r0, #31 and r3, r0, #31
...@@ -73,5 +89,7 @@ ...@@ -73,5 +89,7 @@
moveq r0, #0 moveq r0, #0
restore_irqs ip restore_irqs ip
mov pc, lr mov pc, lr
UNWIND( .fnend )
ENDPROC(\name )
.endm .endm
#endif #endif
...@@ -12,6 +12,4 @@ ...@@ -12,6 +12,4 @@
#include "bitops.h" #include "bitops.h"
.text .text
ENTRY(_change_bit) bitop _change_bit, eor
bitop eor
ENDPROC(_change_bit)
...@@ -12,6 +12,4 @@ ...@@ -12,6 +12,4 @@
#include "bitops.h" #include "bitops.h"
.text .text
ENTRY(_clear_bit) bitop _clear_bit, bic
bitop bic
ENDPROC(_clear_bit)
...@@ -12,6 +12,4 @@ ...@@ -12,6 +12,4 @@
#include "bitops.h" #include "bitops.h"
.text .text
ENTRY(_set_bit) bitop _set_bit, orr
bitop orr
ENDPROC(_set_bit)
...@@ -12,6 +12,4 @@ ...@@ -12,6 +12,4 @@
#include "bitops.h" #include "bitops.h"
.text .text
ENTRY(_test_and_change_bit) testop _test_and_change_bit, eor, str
testop eor, str
ENDPROC(_test_and_change_bit)
...@@ -12,6 +12,4 @@ ...@@ -12,6 +12,4 @@
#include "bitops.h" #include "bitops.h"
.text .text
ENTRY(_test_and_clear_bit) testop _test_and_clear_bit, bicne, strne
testop bicne, strne
ENDPROC(_test_and_clear_bit)
...@@ -12,6 +12,4 @@ ...@@ -12,6 +12,4 @@
#include "bitops.h" #include "bitops.h"
.text .text
ENTRY(_test_and_set_bit) testop _test_and_set_bit, orreq, streq
testop orreq, streq
ENDPROC(_test_and_set_bit)
...@@ -61,7 +61,7 @@ static inline void cache_sync(void) ...@@ -61,7 +61,7 @@ static inline void cache_sync(void)
{ {
void __iomem *base = l2x0_base; void __iomem *base = l2x0_base;
#ifdef CONFIG_ARM_ERRATA_753970 #ifdef CONFIG_PL310_ERRATA_753970
/* write to an unmmapped register */ /* write to an unmmapped register */
writel_relaxed(0, base + L2X0_DUMMY_REG); writel_relaxed(0, base + L2X0_DUMMY_REG);
#else #else
......
...@@ -168,7 +168,7 @@ static int __init consistent_init(void) ...@@ -168,7 +168,7 @@ static int __init consistent_init(void)
pte_t *pte; pte_t *pte;
int i = 0; int i = 0;
unsigned long base = consistent_base; unsigned long base = consistent_base;
unsigned long num_ptes = (CONSISTENT_END - base) >> PGDIR_SHIFT; unsigned long num_ptes = (CONSISTENT_END - base) >> PMD_SHIFT;
consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL); consistent_pte = kmalloc(num_ptes * sizeof(pte_t), GFP_KERNEL);
if (!consistent_pte) { if (!consistent_pte) {
...@@ -332,6 +332,15 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp, ...@@ -332,6 +332,15 @@ __dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
struct page *page; struct page *page;
void *addr; void *addr;
/*
* Following is a work-around (a.k.a. hack) to prevent pages
* with __GFP_COMP being passed to split_page() which cannot
* handle them. The real problem is that this flag probably
* should be 0 on ARM as it is not supported on this
* platform; see CONFIG_HUGETLBFS.
*/
gfp &= ~(__GFP_COMP);
*handle = ~0; *handle = ~0;
size = PAGE_ALIGN(size); size = PAGE_ALIGN(size);
......
...@@ -9,8 +9,7 @@ ...@@ -9,8 +9,7 @@
#include <linux/io.h> #include <linux/io.h>
#include <linux/personality.h> #include <linux/personality.h>
#include <linux/random.h> #include <linux/random.h>
#include <asm/cputype.h> #include <asm/cachetype.h>
#include <asm/system.h>
#define COLOUR_ALIGN(addr,pgoff) \ #define COLOUR_ALIGN(addr,pgoff) \
((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \ ((((addr)+SHMLBA-1)&~(SHMLBA-1)) + \
...@@ -32,25 +31,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr, ...@@ -32,25 +31,15 @@ arch_get_unmapped_area(struct file *filp, unsigned long addr,
struct mm_struct *mm = current->mm; struct mm_struct *mm = current->mm;
struct vm_area_struct *vma; struct vm_area_struct *vma;
unsigned long start_addr; unsigned long start_addr;
#if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) int do_align = 0;
unsigned int cache_type; int aliasing = cache_is_vipt_aliasing();
int do_align = 0, aliasing = 0;
/* /*
* We only need to do colour alignment if either the I or D * We only need to do colour alignment if either the I or D
* caches alias. This is indicated by bits 9 and 21 of the * caches alias.
* cache type register.
*/ */
cache_type = read_cpuid_cachetype(); if (aliasing)
if (cache_type != read_cpuid_id()) { do_align = filp || (flags & MAP_SHARED);
aliasing = (cache_type | cache_type >> 12) & (1 << 11);
if (aliasing)
do_align = filp || flags & MAP_SHARED;
}
#else
#define do_align 0
#define aliasing 0
#endif
/* /*
* We enforce the MAP_FIXED case. * We enforce the MAP_FIXED case.
......
...@@ -593,7 +593,7 @@ static int __devinit nuc900_i2c_probe(struct platform_device *pdev) ...@@ -593,7 +593,7 @@ static int __devinit nuc900_i2c_probe(struct platform_device *pdev)
i2c->adap.algo_data = i2c; i2c->adap.algo_data = i2c;
i2c->adap.dev.parent = &pdev->dev; i2c->adap.dev.parent = &pdev->dev;
mfp_set_groupg(&pdev->dev); mfp_set_groupg(&pdev->dev, NULL);
clk_get_rate(i2c->clk); clk_get_rate(i2c->clk);
......
...@@ -426,7 +426,7 @@ static int __devinit nuc900_spi_probe(struct platform_device *pdev) ...@@ -426,7 +426,7 @@ static int __devinit nuc900_spi_probe(struct platform_device *pdev)
goto err_clk; goto err_clk;
} }
mfp_set_groupg(&pdev->dev); mfp_set_groupg(&pdev->dev, NULL);
nuc900_init_spi(hw); nuc900_init_spi(hw);
err = spi_bitbang_start(&hw->bitbang); err = spi_bitbang_start(&hw->bitbang);
......
...@@ -365,7 +365,8 @@ static int __devinit nuc900_ac97_drvprobe(struct platform_device *pdev) ...@@ -365,7 +365,8 @@ static int __devinit nuc900_ac97_drvprobe(struct platform_device *pdev)
if (ret) if (ret)
goto out3; goto out3;
mfp_set_groupg(nuc900_audio->dev); /* enbale ac97 multifunction pin*/ /* enbale ac97 multifunction pin */
mfp_set_groupg(nuc900_audio->dev, "nuc900-audio");
return 0; return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment