Commit d50e071f authored by Robin Murphy's avatar Robin Murphy Committed by Catalin Marinas

arm64: Implement pmem API support

Add a clean-to-point-of-persistence cache maintenance helper, and wire
up the basic architectural support for the pmem driver based on it.
Reviewed-by: default avatarWill Deacon <will.deacon@arm.com>
Signed-off-by: default avatarRobin Murphy <robin.murphy@arm.com>
[catalin.marinas@arm.com: move arch_*_pmem() functions to arch/arm64/mm/flush.c]
[catalin.marinas@arm.com: change dmb(sy) to dmb(osh)]
Signed-off-by: default avatarCatalin Marinas <catalin.marinas@arm.com>
parent e1bc5d1b
...@@ -960,6 +960,17 @@ config ARM64_UAO ...@@ -960,6 +960,17 @@ config ARM64_UAO
regular load/store instructions if the cpu does not implement the regular load/store instructions if the cpu does not implement the
feature. feature.
config ARM64_PMEM
bool "Enable support for persistent memory"
select ARCH_HAS_PMEM_API
help
Say Y to enable support for the persistent memory API based on the
ARMv8.2 DCPoP feature.
The feature is detected at runtime, and the kernel will use DC CVAC
operations if DC CVAP is not supported (following the behaviour of
DC CVAP itself if the system does not define a point of persistence).
endmenu endmenu
config ARM64_MODULE_CMODEL_LARGE config ARM64_MODULE_CMODEL_LARGE
......
...@@ -352,6 +352,12 @@ alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE ...@@ -352,6 +352,12 @@ alternative_if_not ARM64_WORKAROUND_CLEAN_CACHE
dc \op, \kaddr dc \op, \kaddr
alternative_else alternative_else
dc civac, \kaddr dc civac, \kaddr
alternative_endif
.elseif (\op == cvap)
alternative_if ARM64_HAS_DCPOP
sys 3, c7, c12, 1, \kaddr // dc cvap
alternative_else
dc cvac, \kaddr
alternative_endif alternative_endif
.else .else
dc \op, \kaddr dc \op, \kaddr
......
...@@ -69,6 +69,7 @@ extern void flush_icache_range(unsigned long start, unsigned long end); ...@@ -69,6 +69,7 @@ extern void flush_icache_range(unsigned long start, unsigned long end);
extern void __flush_dcache_area(void *addr, size_t len); extern void __flush_dcache_area(void *addr, size_t len);
extern void __inval_dcache_area(void *addr, size_t len); extern void __inval_dcache_area(void *addr, size_t len);
extern void __clean_dcache_area_poc(void *addr, size_t len); extern void __clean_dcache_area_poc(void *addr, size_t len);
extern void __clean_dcache_area_pop(void *addr, size_t len);
extern void __clean_dcache_area_pou(void *addr, size_t len); extern void __clean_dcache_area_pou(void *addr, size_t len);
extern long __flush_cache_user_range(unsigned long start, unsigned long end); extern long __flush_cache_user_range(unsigned long start, unsigned long end);
extern void sync_icache_aliases(void *kaddr, unsigned long len); extern void sync_icache_aliases(void *kaddr, unsigned long len);
......
...@@ -39,7 +39,8 @@ ...@@ -39,7 +39,8 @@
#define ARM64_WORKAROUND_QCOM_FALKOR_E1003 18 #define ARM64_WORKAROUND_QCOM_FALKOR_E1003 18
#define ARM64_WORKAROUND_858921 19 #define ARM64_WORKAROUND_858921 19
#define ARM64_WORKAROUND_CAVIUM_30115 20 #define ARM64_WORKAROUND_CAVIUM_30115 20
#define ARM64_HAS_DCPOP 21
#define ARM64_NCAPS 21 #define ARM64_NCAPS 22
#endif /* __ASM_CPUCAPS_H */ #endif /* __ASM_CPUCAPS_H */
...@@ -889,6 +889,17 @@ static const struct arm64_cpu_capabilities arm64_features[] = { ...@@ -889,6 +889,17 @@ static const struct arm64_cpu_capabilities arm64_features[] = {
.min_field_value = 0, .min_field_value = 0,
.matches = has_no_fpsimd, .matches = has_no_fpsimd,
}, },
#ifdef CONFIG_ARM64_PMEM
{
.desc = "Data cache clean to Point of Persistence",
.capability = ARM64_HAS_DCPOP,
.def_scope = SCOPE_SYSTEM,
.matches = has_cpuid_feature,
.sys_reg = SYS_ID_AA64ISAR1_EL1,
.field_pos = ID_AA64ISAR1_DPB_SHIFT,
.min_field_value = 1,
},
#endif
{}, {},
}; };
......
...@@ -171,6 +171,20 @@ __dma_clean_area: ...@@ -171,6 +171,20 @@ __dma_clean_area:
ENDPIPROC(__clean_dcache_area_poc) ENDPIPROC(__clean_dcache_area_poc)
ENDPROC(__dma_clean_area) ENDPROC(__dma_clean_area)
/*
* __clean_dcache_area_pop(kaddr, size)
*
* Ensure that any D-cache lines for the interval [kaddr, kaddr+size)
* are cleaned to the PoP.
*
* - kaddr - kernel address
* - size - size in question
*/
ENTRY(__clean_dcache_area_pop)
dcache_by_line_op cvap, sy, x0, x1, x2, x3
ret
ENDPIPROC(__clean_dcache_area_pop)
/* /*
* __dma_flush_area(start, size) * __dma_flush_area(start, size)
* *
......
...@@ -83,3 +83,19 @@ EXPORT_SYMBOL(flush_dcache_page); ...@@ -83,3 +83,19 @@ EXPORT_SYMBOL(flush_dcache_page);
* Additional functions defined in assembly. * Additional functions defined in assembly.
*/ */
EXPORT_SYMBOL(flush_icache_range); EXPORT_SYMBOL(flush_icache_range);
#ifdef CONFIG_ARCH_HAS_PMEM_API
static inline void arch_wb_cache_pmem(void *addr, size_t size)
{
/* Ensure order against any prior non-cacheable writes */
dmb(osh);
__clean_dcache_area_pop(addr, size);
}
EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
static inline void arch_invalidate_pmem(void *addr, size_t size)
{
__inval_dcache_area(addr, size);
}
EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
#endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment