Commit 76551468 authored by Eugeniy Paltsev's avatar Eugeniy Paltsev Committed by Vineet Gupta

ARCv2: Add explcit unaligned access support (and ability to disable too)

As of today we enable unaligned access unconditionally on ARCv2.
Do this under a Kconfig option to allow disable it for test, benchmarking
etc. Also while at it

  - Select HAVE_EFFICIENT_UNALIGNED_ACCESS
  - Although gcc defaults to unaligned access (since GNU 2018.03), add the
    right toggles for enabling or disabling as appropriate
  - update bootlog to prints both HW feature status (exists, enabled/disabled)
    and SW status (used / not used).
  - wire up the relaxed memcpy for unaligned access
Signed-off-by: default avatarEugeniy Paltsev <Eugeniy.Paltsev@synopsys.com>
Signed-off-by: default avatarVineet Gupta <vgupta@synopsys.com>
[vgupta: squashed patches, handle gcc -mno-unaligned-access quick]
parent 4d1e7918
...@@ -386,6 +386,15 @@ config ARC_HAS_SWAPE ...@@ -386,6 +386,15 @@ config ARC_HAS_SWAPE
if ISA_ARCV2 if ISA_ARCV2
config ARC_USE_UNALIGNED_MEM_ACCESS
bool "Enable unaligned access in HW"
default y
select HAVE_EFFICIENT_UNALIGNED_ACCESS
help
The ARC HS architecture supports unaligned memory access
which is disabled by default. Enable unaligned access in
hardware and use software to use it
config ARC_HAS_LL64 config ARC_HAS_LL64
bool "Insn: 64bit LDD/STD" bool "Insn: 64bit LDD/STD"
help help
......
...@@ -28,6 +28,12 @@ cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape ...@@ -28,6 +28,12 @@ cflags-$(CONFIG_ARC_HAS_SWAPE) += -mswape
ifdef CONFIG_ISA_ARCV2 ifdef CONFIG_ISA_ARCV2
ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS
cflags-y += -munaligned-access
else
cflags-y += -mno-unaligned-access
endif
ifndef CONFIG_ARC_HAS_LL64 ifndef CONFIG_ARC_HAS_LL64
cflags-y += -mno-ll64 cflags-y += -mno-ll64
endif endif
......
...@@ -82,6 +82,7 @@ ...@@ -82,6 +82,7 @@
#define ECR_V_DTLB_MISS 0x05 #define ECR_V_DTLB_MISS 0x05
#define ECR_V_PROTV 0x06 #define ECR_V_PROTV 0x06
#define ECR_V_TRAP 0x09 #define ECR_V_TRAP 0x09
#define ECR_V_MISALIGN 0x0d
#endif #endif
/* DTLB Miss and Protection Violation Cause Codes */ /* DTLB Miss and Protection Violation Cause Codes */
......
...@@ -44,7 +44,13 @@ ...@@ -44,7 +44,13 @@
#define ARCV2_IRQ_DEF_PRIO 1 #define ARCV2_IRQ_DEF_PRIO 1
/* seed value for status register */ /* seed value for status register */
#define ISA_INIT_STATUS_BITS (STATUS_IE_MASK | STATUS_AD_MASK | \ #ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS
#define __AD_ENB STATUS_AD_MASK
#else
#define __AD_ENB 0
#endif
#define ISA_INIT_STATUS_BITS (STATUS_IE_MASK | __AD_ENB | \
(ARCV2_IRQ_DEF_PRIO << 1)) (ARCV2_IRQ_DEF_PRIO << 1))
#ifndef __ASSEMBLY__ #ifndef __ASSEMBLY__
......
...@@ -54,7 +54,12 @@ ...@@ -54,7 +54,12 @@
; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access ; gcc 7.3.1 (ARC GNU 2018.03) onwards generates unaligned access
; by default ; by default
lr r5, [status32] lr r5, [status32]
#ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS
bset r5, r5, STATUS_AD_BIT bset r5, r5, STATUS_AD_BIT
#else
; Although disabled at reset, bootloader might have enabled it
bclr r5, r5, STATUS_AD_BIT
#endif
kflag r5 kflag r5
#endif #endif
.endm .endm
......
...@@ -95,7 +95,7 @@ void arc_init_IRQ(void) ...@@ -95,7 +95,7 @@ void arc_init_IRQ(void)
/* setup status32, don't enable intr yet as kernel doesn't want */ /* setup status32, don't enable intr yet as kernel doesn't want */
tmp = read_aux_reg(ARC_REG_STATUS32); tmp = read_aux_reg(ARC_REG_STATUS32);
tmp |= STATUS_AD_MASK | (ARCV2_IRQ_DEF_PRIO << 1); tmp |= ARCV2_IRQ_DEF_PRIO << 1;
tmp &= ~STATUS_IE_MASK; tmp &= ~STATUS_IE_MASK;
asm volatile("kflag %0 \n"::"r"(tmp)); asm volatile("kflag %0 \n"::"r"(tmp));
} }
......
...@@ -263,7 +263,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) ...@@ -263,7 +263,7 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
{ {
struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id]; struct cpuinfo_arc *cpu = &cpuinfo_arc700[cpu_id];
struct bcr_identity *core = &cpu->core; struct bcr_identity *core = &cpu->core;
int i, n = 0, ua = 0; int n = 0;
FIX_PTR(cpu); FIX_PTR(cpu);
...@@ -283,16 +283,23 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len) ...@@ -283,16 +283,23 @@ static char *arc_cpu_mumbojumbo(int cpu_id, char *buf, int len)
IS_AVAIL2(cpu->extn.rtc, "RTC [UP 64-bit] ", CONFIG_ARC_TIMERS_64BIT), IS_AVAIL2(cpu->extn.rtc, "RTC [UP 64-bit] ", CONFIG_ARC_TIMERS_64BIT),
IS_AVAIL2(cpu->extn.gfrc, "GFRC [SMP 64-bit] ", CONFIG_ARC_TIMERS_64BIT)); IS_AVAIL2(cpu->extn.gfrc, "GFRC [SMP 64-bit] ", CONFIG_ARC_TIMERS_64BIT));
#ifdef __ARC_UNALIGNED__ n += scnprintf(buf + n, len - n, "%s%s%s%s%s%s",
ua = 1; IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
IS_AVAIL2(cpu->isa.unalign, "unalign ", CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS));
#if defined(__ARC_UNALIGNED__) && !defined(CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS)
/*
* gcc 7.3.1 (GNU 2018.03) onwards generate unaligned access by default
* but -mno-unaligned-access to disable that didn't work until gcc 8.2.1
* (GNU 2019.03). So landing here implies the interim period, when
* despite Kconfig being off, gcc is generating unaligned accesses which
* could bomb later on. So better to disallow such broken builds
*/
BUILD_BUG_ON_MSG(1, "gcc doesn't support -mno-unaligned-access");
#endif #endif
n += i = scnprintf(buf + n, len - n, "%s%s%s%s%s%s",
IS_AVAIL2(cpu->isa.atomic, "atomic ", CONFIG_ARC_HAS_LLSC),
IS_AVAIL2(cpu->isa.ldd, "ll64 ", CONFIG_ARC_HAS_LL64),
IS_AVAIL1(cpu->isa.unalign, "unalign "), IS_USED_RUN(ua));
if (i) n += scnprintf(buf + n, len - n, "\n\t\t: ");
n += scnprintf(buf + n, len - n, "\n\t\t: ");
if (cpu->extn_mpy.ver) { if (cpu->extn_mpy.ver) {
if (cpu->extn_mpy.ver <= 0x2) { /* ARCompact */ if (cpu->extn_mpy.ver <= 0x2) { /* ARCompact */
......
...@@ -145,7 +145,8 @@ static void show_ecr_verbose(struct pt_regs *regs) ...@@ -145,7 +145,8 @@ static void show_ecr_verbose(struct pt_regs *regs)
} else if (vec == ECR_V_PROTV) { } else if (vec == ECR_V_PROTV) {
if (cause_code == ECR_C_PROTV_INST_FETCH) if (cause_code == ECR_C_PROTV_INST_FETCH)
pr_cont("Execute from Non-exec Page\n"); pr_cont("Execute from Non-exec Page\n");
else if (cause_code == ECR_C_PROTV_MISALIG_DATA) else if (cause_code == ECR_C_PROTV_MISALIG_DATA &&
IS_ENABLED(CONFIG_ISA_ARCOMPACT))
pr_cont("Misaligned r/w from 0x%08lx\n", address); pr_cont("Misaligned r/w from 0x%08lx\n", address);
else else
pr_cont("%s access not allowed on page\n", pr_cont("%s access not allowed on page\n",
...@@ -161,6 +162,8 @@ static void show_ecr_verbose(struct pt_regs *regs) ...@@ -161,6 +162,8 @@ static void show_ecr_verbose(struct pt_regs *regs)
pr_cont("Bus Error from Data Mem\n"); pr_cont("Bus Error from Data Mem\n");
else else
pr_cont("Bus Error, check PRM\n"); pr_cont("Bus Error, check PRM\n");
} else if (vec == ECR_V_MISALIGN) {
pr_cont("Misaligned r/w from 0x%08lx\n", address);
#endif #endif
} else if (vec == ECR_V_TRAP) { } else if (vec == ECR_V_TRAP) {
if (regs->ecr_param == 5) if (regs->ecr_param == 5)
......
...@@ -8,4 +8,10 @@ ...@@ -8,4 +8,10 @@
lib-y := strchr-700.o strcpy-700.o strlen.o memcmp.o lib-y := strchr-700.o strcpy-700.o strlen.o memcmp.o
lib-$(CONFIG_ISA_ARCOMPACT) += memcpy-700.o memset.o strcmp.o lib-$(CONFIG_ISA_ARCOMPACT) += memcpy-700.o memset.o strcmp.o
lib-$(CONFIG_ISA_ARCV2) += memcpy-archs.o memset-archs.o strcmp-archs.o lib-$(CONFIG_ISA_ARCV2) += memset-archs.o strcmp-archs.o
ifdef CONFIG_ARC_USE_UNALIGNED_MEM_ACCESS
lib-$(CONFIG_ISA_ARCV2) +=memcpy-archs-unaligned.o
else
lib-$(CONFIG_ISA_ARCV2) +=memcpy-archs.o
endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment