Commit 293bccc5 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'nds32-for-linus-4.18' of git://git.kernel.org/pub/scm/linux/kernel/git/greentime/linux

Pull nds32 updates from Greentime Hu:
 "Bug fixes and build ixes for nds32"

* tag 'nds32-for-linus-4.18' of git://git.kernel.org/pub/scm/linux/kernel/git/greentime/linux:
  nds32: fix build error "relocation truncated to fit: R_NDS32_25_PCREL_RELA" when make allyesconfig
  nds32: To simplify the implementation of update_mmu_cache()
  nds32: Fix the dts pointer is not passed correctly issue.
  nds32: To implement these icache invalidation APIs since nds32 cores don't snoop data cache. This issue is found by Guo Ren. Based on the Documentation/core-api/cachetlb.rst and it says:
  nds32: Fix build error caused by configuration flag rename
  nds32: define __NDS32_E[BL]__ for sparse
parents bfa54a3a 98755389
...@@ -12,17 +12,17 @@ config NDS32 ...@@ -12,17 +12,17 @@ config NDS32
select CLONE_BACKWARDS select CLONE_BACKWARDS
select COMMON_CLK select COMMON_CLK
select DMA_NONCOHERENT_OPS select DMA_NONCOHERENT_OPS
select GENERIC_ASHLDI3
select GENERIC_ASHRDI3
select GENERIC_LSHRDI3
select GENERIC_CMPDI2
select GENERIC_MULDI3
select GENERIC_UCMPDI2
select GENERIC_ATOMIC64 select GENERIC_ATOMIC64
select GENERIC_CPU_DEVICES select GENERIC_CPU_DEVICES
select GENERIC_CLOCKEVENTS select GENERIC_CLOCKEVENTS
select GENERIC_IRQ_CHIP select GENERIC_IRQ_CHIP
select GENERIC_IRQ_SHOW select GENERIC_IRQ_SHOW
select GENERIC_LIB_ASHLDI3
select GENERIC_LIB_ASHRDI3
select GENERIC_LIB_CMPDI2
select GENERIC_LIB_LSHRDI3
select GENERIC_LIB_MULDI3
select GENERIC_LIB_UCMPDI2
select GENERIC_STRNCPY_FROM_USER select GENERIC_STRNCPY_FROM_USER
select GENERIC_STRNLEN_USER select GENERIC_STRNLEN_USER
select GENERIC_TIME_VSYSCALL select GENERIC_TIME_VSYSCALL
......
...@@ -34,10 +34,12 @@ ifdef CONFIG_CPU_LITTLE_ENDIAN ...@@ -34,10 +34,12 @@ ifdef CONFIG_CPU_LITTLE_ENDIAN
KBUILD_CFLAGS += $(call cc-option, -EL) KBUILD_CFLAGS += $(call cc-option, -EL)
KBUILD_AFLAGS += $(call cc-option, -EL) KBUILD_AFLAGS += $(call cc-option, -EL)
LDFLAGS += $(call cc-option, -EL) LDFLAGS += $(call cc-option, -EL)
CHECKFLAGS += -D__NDS32_EL__
else else
KBUILD_CFLAGS += $(call cc-option, -EB) KBUILD_CFLAGS += $(call cc-option, -EB)
KBUILD_AFLAGS += $(call cc-option, -EB) KBUILD_AFLAGS += $(call cc-option, -EB)
LDFLAGS += $(call cc-option, -EB) LDFLAGS += $(call cc-option, -EB)
CHECKFLAGS += -D__NDS32_EB__
endif endif
boot := arch/nds32/boot boot := arch/nds32/boot
......
...@@ -8,6 +8,8 @@ ...@@ -8,6 +8,8 @@
#define PG_dcache_dirty PG_arch_1 #define PG_dcache_dirty PG_arch_1
void flush_icache_range(unsigned long start, unsigned long end);
void flush_icache_page(struct vm_area_struct *vma, struct page *page);
#ifdef CONFIG_CPU_CACHE_ALIASING #ifdef CONFIG_CPU_CACHE_ALIASING
void flush_cache_mm(struct mm_struct *mm); void flush_cache_mm(struct mm_struct *mm);
void flush_cache_dup_mm(struct mm_struct *mm); void flush_cache_dup_mm(struct mm_struct *mm);
...@@ -34,13 +36,16 @@ void flush_anon_page(struct vm_area_struct *vma, ...@@ -34,13 +36,16 @@ void flush_anon_page(struct vm_area_struct *vma,
void flush_kernel_dcache_page(struct page *page); void flush_kernel_dcache_page(struct page *page);
void flush_kernel_vmap_range(void *addr, int size); void flush_kernel_vmap_range(void *addr, int size);
void invalidate_kernel_vmap_range(void *addr, int size); void invalidate_kernel_vmap_range(void *addr, int size);
void flush_icache_range(unsigned long start, unsigned long end);
void flush_icache_page(struct vm_area_struct *vma, struct page *page);
#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&(mapping)->i_pages) #define flush_dcache_mmap_lock(mapping) xa_lock_irq(&(mapping)->i_pages)
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&(mapping)->i_pages) #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&(mapping)->i_pages)
#else #else
#include <asm-generic/cacheflush.h> #include <asm-generic/cacheflush.h>
#undef flush_icache_range
#undef flush_icache_page
#undef flush_icache_user_range
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len);
#endif #endif
#endif /* __NDS32_CACHEFLUSH_H__ */ #endif /* __NDS32_CACHEFLUSH_H__ */
...@@ -16,7 +16,7 @@ ...@@ -16,7 +16,7 @@
" .popsection\n" \ " .popsection\n" \
" .pushsection .fixup,\"ax\"\n" \ " .pushsection .fixup,\"ax\"\n" \
"4: move %0, " err_reg "\n" \ "4: move %0, " err_reg "\n" \
" j 3b\n" \ " b 3b\n" \
" .popsection" " .popsection"
#define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \ #define __futex_atomic_op(insn, ret, oldval, tmp, uaddr, oparg) \
......
...@@ -278,7 +278,8 @@ static void __init setup_memory(void) ...@@ -278,7 +278,8 @@ static void __init setup_memory(void)
void __init setup_arch(char **cmdline_p) void __init setup_arch(char **cmdline_p)
{ {
early_init_devtree( __dtb_start); early_init_devtree(__atags_pointer ? \
phys_to_virt(__atags_pointer) : __dtb_start);
setup_cpuinfo(); setup_cpuinfo();
......
...@@ -13,7 +13,39 @@ ...@@ -13,7 +13,39 @@
extern struct cache_info L1_cache_info[2]; extern struct cache_info L1_cache_info[2];
#ifndef CONFIG_CPU_CACHE_ALIASING void flush_icache_range(unsigned long start, unsigned long end)
{
unsigned long line_size, flags;
line_size = L1_cache_info[DCACHE].line_size;
start = start & ~(line_size - 1);
end = (end + line_size - 1) & ~(line_size - 1);
local_irq_save(flags);
cpu_cache_wbinval_range(start, end, 1);
local_irq_restore(flags);
}
EXPORT_SYMBOL(flush_icache_range);
void flush_icache_page(struct vm_area_struct *vma, struct page *page)
{
unsigned long flags;
unsigned long kaddr;
local_irq_save(flags);
kaddr = (unsigned long)kmap_atomic(page);
cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
kunmap_atomic((void *)kaddr);
local_irq_restore(flags);
}
EXPORT_SYMBOL(flush_icache_page);
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len)
{
unsigned long kaddr;
kaddr = (unsigned long)kmap_atomic(page) + (addr & ~PAGE_MASK);
flush_icache_range(kaddr, kaddr + len);
kunmap_atomic((void *)kaddr);
}
void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
pte_t * pte) pte_t * pte)
{ {
...@@ -35,19 +67,15 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr, ...@@ -35,19 +67,15 @@ void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
if ((test_and_clear_bit(PG_dcache_dirty, &page->flags)) || if ((test_and_clear_bit(PG_dcache_dirty, &page->flags)) ||
(vma->vm_flags & VM_EXEC)) { (vma->vm_flags & VM_EXEC)) {
unsigned long kaddr;
if (!PageHighMem(page)) { local_irq_save(flags);
cpu_cache_wbinval_page((unsigned long) kaddr = (unsigned long)kmap_atomic(page);
page_address(page), cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
vma->vm_flags & VM_EXEC); kunmap_atomic((void *)kaddr);
} else { local_irq_restore(flags);
unsigned long kaddr = (unsigned long)kmap_atomic(page);
cpu_cache_wbinval_page(kaddr, vma->vm_flags & VM_EXEC);
kunmap_atomic((void *)kaddr);
}
} }
} }
#else #ifdef CONFIG_CPU_CACHE_ALIASING
extern pte_t va_present(struct mm_struct *mm, unsigned long addr); extern pte_t va_present(struct mm_struct *mm, unsigned long addr);
static inline unsigned long aliasing(unsigned long addr, unsigned long page) static inline unsigned long aliasing(unsigned long addr, unsigned long page)
...@@ -317,52 +345,4 @@ void invalidate_kernel_vmap_range(void *addr, int size) ...@@ -317,52 +345,4 @@ void invalidate_kernel_vmap_range(void *addr, int size)
local_irq_restore(flags); local_irq_restore(flags);
} }
EXPORT_SYMBOL(invalidate_kernel_vmap_range); EXPORT_SYMBOL(invalidate_kernel_vmap_range);
void flush_icache_range(unsigned long start, unsigned long end)
{
unsigned long line_size, flags;
line_size = L1_cache_info[DCACHE].line_size;
start = start & ~(line_size - 1);
end = (end + line_size - 1) & ~(line_size - 1);
local_irq_save(flags);
cpu_cache_wbinval_range(start, end, 1);
local_irq_restore(flags);
}
EXPORT_SYMBOL(flush_icache_range);
void flush_icache_page(struct vm_area_struct *vma, struct page *page)
{
unsigned long flags;
local_irq_save(flags);
cpu_cache_wbinval_page((unsigned long)page_address(page),
vma->vm_flags & VM_EXEC);
local_irq_restore(flags);
}
void update_mmu_cache(struct vm_area_struct *vma, unsigned long addr,
pte_t * pte)
{
struct page *page;
unsigned long flags;
unsigned long pfn = pte_pfn(*pte);
if (!pfn_valid(pfn))
return;
if (vma->vm_mm == current->active_mm) {
local_irq_save(flags);
__nds32__mtsr_dsb(addr, NDS32_SR_TLB_VPN);
__nds32__tlbop_rwr(*pte);
__nds32__isb();
local_irq_restore(flags);
}
page = pfn_to_page(pfn);
if (test_and_clear_bit(PG_dcache_dirty, &page->flags) ||
(vma->vm_flags & VM_EXEC)) {
local_irq_save(flags);
cpu_dcache_wbinval_page((unsigned long)page_address(page));
local_irq_restore(flags);
}
}
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment