Commit 1ae98561 authored by Dave Martin's avatar Dave Martin Committed by Nicolas Pitre

ARM: mcpm_head.S: vlock-based first man election

Instead of requiring the first man to be elected in advance (which
can be suboptimal in some situations), this patch uses a per-
cluster mutex to co-ordinate selection of the first man.

This should also make it more feasible to reuse this code path for
asynchronous cluster resume (as in CPUidle scenarios).

We must ensure that the vlock data doesn't share a cacheline with
anything else, or dirty cache eviction could corrupt it.
Signed-off-by: default avatarDave Martin <dave.martin@linaro.org>
Signed-off-by: default avatarNicolas Pitre <nicolas.pitre@linaro.org>
Reviewed-by: default avatarSantosh Shilimkar <santosh.shilimkar@ti.com>
Reviewed-by: default avatarWill Deacon <will.deacon@arm.com>
parent 9762f12d
...@@ -11,4 +11,4 @@ obj-$(CONFIG_SHARP_PARAM) += sharpsl_param.o ...@@ -11,4 +11,4 @@ obj-$(CONFIG_SHARP_PARAM) += sharpsl_param.o
obj-$(CONFIG_SHARP_SCOOP) += scoop.o obj-$(CONFIG_SHARP_SCOOP) += scoop.o
obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o obj-$(CONFIG_PCI_HOST_ITE8152) += it8152.o
obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o obj-$(CONFIG_ARM_TIMER_SP804) += timer-sp.o
obj-$(CONFIG_MCPM) += mcpm_head.o mcpm_entry.o obj-$(CONFIG_MCPM) += mcpm_head.o mcpm_entry.o vlock.o
...@@ -16,6 +16,8 @@ ...@@ -16,6 +16,8 @@
#include <linux/linkage.h> #include <linux/linkage.h>
#include <asm/mcpm.h> #include <asm/mcpm.h>
#include "vlock.h"
.if MCPM_SYNC_CLUSTER_CPUS .if MCPM_SYNC_CLUSTER_CPUS
.error "cpus must be the first member of struct mcpm_sync_struct" .error "cpus must be the first member of struct mcpm_sync_struct"
.endif .endif
...@@ -69,10 +71,11 @@ ENTRY(mcpm_entry_point) ...@@ -69,10 +71,11 @@ ENTRY(mcpm_entry_point)
* position independent way. * position independent way.
*/ */
adr r5, 3f adr r5, 3f
ldmia r5, {r6, r7, r8} ldmia r5, {r6, r7, r8, r11}
add r6, r5, r6 @ r6 = mcpm_entry_vectors add r6, r5, r6 @ r6 = mcpm_entry_vectors
ldr r7, [r5, r7] @ r7 = mcpm_power_up_setup_phys ldr r7, [r5, r7] @ r7 = mcpm_power_up_setup_phys
add r8, r5, r8 @ r8 = mcpm_sync add r8, r5, r8 @ r8 = mcpm_sync
add r11, r5, r11 @ r11 = first_man_locks
mov r0, #MCPM_SYNC_CLUSTER_SIZE mov r0, #MCPM_SYNC_CLUSTER_SIZE
mla r8, r0, r10, r8 @ r8 = sync cluster base mla r8, r0, r10, r8 @ r8 = sync cluster base
...@@ -86,13 +89,22 @@ ENTRY(mcpm_entry_point) ...@@ -86,13 +89,22 @@ ENTRY(mcpm_entry_point)
@ At this point, the cluster cannot unexpectedly enter the GOING_DOWN @ At this point, the cluster cannot unexpectedly enter the GOING_DOWN
@ state, because there is at least one active CPU (this CPU). @ state, because there is at least one active CPU (this CPU).
@ Note: the following is racy as another CPU might be testing mov r0, #VLOCK_SIZE
@ the same flag at the same moment. That'll be fixed later. mla r11, r0, r10, r11 @ r11 = cluster first man lock
mov r0, r11
mov r1, r9 @ cpu
bl vlock_trylock @ implies DMB
cmp r0, #0 @ failed to get the lock?
bne mcpm_setup_wait @ wait for cluster setup if so
ldrb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER] ldrb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
cmp r0, #CLUSTER_UP @ cluster already up? cmp r0, #CLUSTER_UP @ cluster already up?
bne mcpm_setup @ if not, set up the cluster bne mcpm_setup @ if not, set up the cluster
@ Otherwise, skip setup: @ Otherwise, release the first man lock and skip setup:
mov r0, r11
bl vlock_unlock
b mcpm_setup_complete b mcpm_setup_complete
mcpm_setup: mcpm_setup:
...@@ -142,6 +154,19 @@ mcpm_setup_leave: ...@@ -142,6 +154,19 @@ mcpm_setup_leave:
dsb dsb
sev sev
mov r0, r11
bl vlock_unlock @ implies DMB
b mcpm_setup_complete
@ In the contended case, non-first men wait here for cluster setup
@ to complete:
mcpm_setup_wait:
ldrb r0, [r8, #MCPM_SYNC_CLUSTER_CLUSTER]
cmp r0, #CLUSTER_UP
wfene
bne mcpm_setup_wait
dmb
mcpm_setup_complete: mcpm_setup_complete:
@ If a platform-specific CPU setup hook is needed, it is @ If a platform-specific CPU setup hook is needed, it is
@ called from here. @ called from here.
...@@ -173,11 +198,17 @@ mcpm_entry_gated: ...@@ -173,11 +198,17 @@ mcpm_entry_gated:
3: .word mcpm_entry_vectors - . 3: .word mcpm_entry_vectors - .
.word mcpm_power_up_setup_phys - 3b .word mcpm_power_up_setup_phys - 3b
.word mcpm_sync - 3b .word mcpm_sync - 3b
.word first_man_locks - 3b
ENDPROC(mcpm_entry_point) ENDPROC(mcpm_entry_point)
.bss .bss
.align 5
.align CACHE_WRITEBACK_ORDER
.type first_man_locks, #object
first_man_locks:
.space VLOCK_SIZE * MAX_NR_CLUSTERS
.align CACHE_WRITEBACK_ORDER
.type mcpm_entry_vectors, #object .type mcpm_entry_vectors, #object
ENTRY(mcpm_entry_vectors) ENTRY(mcpm_entry_vectors)
......
...@@ -150,6 +150,7 @@ int main(void) ...@@ -150,6 +150,7 @@ int main(void)
DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE); DEFINE(DMA_TO_DEVICE, DMA_TO_DEVICE);
DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE); DEFINE(DMA_FROM_DEVICE, DMA_FROM_DEVICE);
BLANK(); BLANK();
DEFINE(CACHE_WRITEBACK_ORDER, __CACHE_WRITEBACK_ORDER);
DEFINE(CACHE_WRITEBACK_GRANULE, __CACHE_WRITEBACK_GRANULE); DEFINE(CACHE_WRITEBACK_GRANULE, __CACHE_WRITEBACK_GRANULE);
BLANK(); BLANK();
#ifdef CONFIG_KVM_ARM_HOST #ifdef CONFIG_KVM_ARM_HOST
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment