Commit 87e29cac authored by Paul Mundt's avatar Paul Mundt

sh: Use L1_CACHE_BYTES for .data.cacheline_aligned.

Previously this was using a hardcoded 32, use L1_CACHE_BYTES for
cacheline alignment instead.
Signed-off-by: default avatarPaul Mundt <lethal@linux-sh.org>
parent 5c36e657
...@@ -3,6 +3,7 @@ ...@@ -3,6 +3,7 @@
* Written by Niibe Yutaka * Written by Niibe Yutaka
*/ */
#include <asm/thread_info.h> #include <asm/thread_info.h>
#include <asm/cache.h>
#include <asm-generic/vmlinux.lds.h> #include <asm-generic/vmlinux.lds.h>
#ifdef CONFIG_CPU_LITTLE_ENDIAN #ifdef CONFIG_CPU_LITTLE_ENDIAN
...@@ -53,7 +54,7 @@ SECTIONS ...@@ -53,7 +54,7 @@ SECTIONS
. = ALIGN(PAGE_SIZE); . = ALIGN(PAGE_SIZE);
.data.page_aligned : { *(.data.page_aligned) } .data.page_aligned : { *(.data.page_aligned) }
. = ALIGN(32); . = ALIGN(L1_CACHE_BYTES);
__per_cpu_start = .; __per_cpu_start = .;
.data.percpu : { *(.data.percpu) } .data.percpu : { *(.data.percpu) }
__per_cpu_end = .; __per_cpu_end = .;
......
...@@ -21,6 +21,7 @@ ...@@ -21,6 +21,7 @@
#define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1)) #define L1_CACHE_ALIGN(x) (((x)+(L1_CACHE_BYTES-1))&~(L1_CACHE_BYTES-1))
#ifndef __ASSEMBLY__
struct cache_info { struct cache_info {
unsigned int ways; /* Number of cache ways */ unsigned int ways; /* Number of cache ways */
unsigned int sets; /* Number of cache sets */ unsigned int sets; /* Number of cache sets */
...@@ -47,6 +48,6 @@ struct cache_info { ...@@ -47,6 +48,6 @@ struct cache_info {
unsigned long flags; unsigned long flags;
}; };
#endif /* __ASSEMBLY__ */
#endif /* __KERNEL__ */ #endif /* __KERNEL__ */
#endif /* __ASM_SH_CACHE_H */ #endif /* __ASM_SH_CACHE_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment