Commit 77c2019f authored by Paul Mundt's avatar Paul Mundt

sh: initial PMB mapping iteration by helper macro.

All of the cached/uncached mapping setup is duplicated for each size, and
also misses out on the 16MB case. Rather than duplicating the same iter
code for that we just consolidate it in to a helper macro that builds an
iter for each size. The 16MB case is then trivially bolted on at the end.
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent 35f99c0d
......@@ -86,6 +86,62 @@ ENTRY(_stext)
#endif
#if defined(CONFIG_PMB) && !defined(CONFIG_PMB_LEGACY)
#define __PMB_ITER_BY_SIZE(size) \
.L##size: \
mov #(size >> 4), r6; \
shll16 r6; \
shll8 r6; \
\
cmp/hi r5, r6; \
bt 9999f; \
\
mov #(PMB_SZ_##size##M >> 2), r9; \
shll2 r9; \
\
/* \
* Cached mapping \
*/ \
mov #PMB_C, r8; \
or r0, r8; \
or r9, r8; \
mov.l r8, @r1; \
mov.l r2, @r3; \
\
/* Increment to the next PMB_DATA entry */ \
add r4, r1; \
/* Increment to the next PMB_ADDR entry */ \
add r4, r3; \
/* Increment number of PMB entries */ \
add #1, r10; \
\
/* \
* Uncached mapping \
*/ \
mov #(PMB_UB >> 8), r8; \
shll8 r8; \
\
or r0, r8; \
or r9, r8; \
mov.l r8, @r1; \
mov r2, r8; \
add r7, r8; \
mov.l r8, @r3; \
\
/* Increment to the next PMB_DATA entry */ \
add r4, r1; \
/* Increment to the next PMB_ADDR entry */ \
add r4, r3; \
/* Increment number of PMB entries */ \
add #1, r10; \
\
sub r6, r5; \
add r6, r0; \
add r6, r2; \
\
bra .L##size; \
9999:
/*
* Reconfigure the initial PMB mappings setup by the hardware.
*
......@@ -142,154 +198,11 @@ ENTRY(_stext)
* r9 = scratch register
* r10 = number of PMB entries we've setup
*/
.L512:
mov #(512 >> 4), r6
shll16 r6
shll8 r6
cmp/hi r5, r6
bt .L128
mov #(PMB_SZ_512M >> 2), r9
shll2 r9
/*
* Cached mapping
*/
mov #PMB_C, r8
or r0, r8
or r9, r8
mov.l r8, @r1
mov.l r2, @r3
add r4, r1 /* Increment to the next PMB_DATA entry */
add r4, r3 /* Increment to the next PMB_ADDR entry */
add #1, r10 /* Increment number of PMB entries */
/*
* Uncached mapping
*/
mov #(PMB_UB >> 8), r8
shll8 r8
or r0, r8
or r9, r8
mov.l r8, @r1
mov r2, r8
add r7, r8
mov.l r8, @r3
add r4, r1 /* Increment to the next PMB_DATA entry */
add r4, r3 /* Increment to the next PMB_ADDR entry */
add #1, r10 /* Increment number of PMB entries */
sub r6, r5
add r6, r0
add r6, r2
bra .L512
.L128:
mov #(128 >> 4), r6
shll16 r6
shll8 r6
cmp/hi r5, r6
bt .L64
mov #(PMB_SZ_128M >> 2), r9
shll2 r9
/*
* Cached mapping
*/
mov #PMB_C, r8
or r0, r8
or r9, r8
mov.l r8, @r1
mov.l r2, @r3
add r4, r1 /* Increment to the next PMB_DATA entry */
add r4, r3 /* Increment to the next PMB_ADDR entry */
add #1, r10 /* Increment number of PMB entries */
/*
* Uncached mapping
*/
mov #(PMB_UB >> 8), r8
shll8 r8
or r0, r8
or r9, r8
mov.l r8, @r1
mov r2, r8
add r7, r8
mov.l r8, @r3
add r4, r1 /* Increment to the next PMB_DATA entry */
add r4, r3 /* Increment to the next PMB_ADDR entry */
add #1, r10 /* Increment number of PMB entries */
sub r6, r5
add r6, r0
add r6, r2
bra .L128
.L64:
mov #(64 >> 4), r6
shll16 r6
shll8 r6
cmp/hi r5, r6
bt .Ldone
mov #(PMB_SZ_64M >> 2), r9
shll2 r9
/*
* Cached mapping
*/
mov #PMB_C, r8
or r0, r8
or r9, r8
mov.l r8, @r1
mov.l r2, @r3
add r4, r1 /* Increment to the next PMB_DATA entry */
add r4, r3 /* Increment to the next PMB_ADDR entry */
add #1, r10 /* Increment number of PMB entries */
/*
* Uncached mapping
*/
mov #(PMB_UB >> 8), r8
shll8 r8
or r0, r8
or r9, r8
mov.l r8, @r1
mov r2, r8
add r7, r8
mov.l r8, @r3
add r4, r1 /* Increment to the next PMB_DATA entry */
add r4, r3 /* Increment to the next PMB_ADDR entry */
add #1, r10 /* Increment number of PMB entries */
sub r6, r5
add r6, r0
add r6, r2
bra .L64
__PMB_ITER_BY_SIZE(512)
__PMB_ITER_BY_SIZE(128)
__PMB_ITER_BY_SIZE(64)
__PMB_ITER_BY_SIZE(16)
.Ldone:
/* Update cached_to_uncached */
mov.l .Lcached_to_uncached, r0
mov.l r7, @r0
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment