Commit 67373994 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'asm-generic-5.5' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/playground

Pull asm-generic fixes from Arnd Bergmann:
 "Here are two bugfixes from Mike Rapoport, both fixing compile-time
  errors for the nds32 architecture that were recently introduced"

* tag 'asm-generic-5.5' of git://git.kernel.org/pub/scm/linux/kernel/git/arnd/playground:
  nds32: fix build failure caused by page table folding updates
  asm-generic/nds32: don't redefine cacheflush primitives
parents c21ed4d9 060dc911
...@@ -9,7 +9,11 @@ ...@@ -9,7 +9,11 @@
#define PG_dcache_dirty PG_arch_1 #define PG_dcache_dirty PG_arch_1
void flush_icache_range(unsigned long start, unsigned long end); void flush_icache_range(unsigned long start, unsigned long end);
#define flush_icache_range flush_icache_range
void flush_icache_page(struct vm_area_struct *vma, struct page *page); void flush_icache_page(struct vm_area_struct *vma, struct page *page);
#define flush_icache_page flush_icache_page
#ifdef CONFIG_CPU_CACHE_ALIASING #ifdef CONFIG_CPU_CACHE_ALIASING
void flush_cache_mm(struct mm_struct *mm); void flush_cache_mm(struct mm_struct *mm);
void flush_cache_dup_mm(struct mm_struct *mm); void flush_cache_dup_mm(struct mm_struct *mm);
...@@ -40,12 +44,11 @@ void invalidate_kernel_vmap_range(void *addr, int size); ...@@ -40,12 +44,11 @@ void invalidate_kernel_vmap_range(void *addr, int size);
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&(mapping)->i_pages) #define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&(mapping)->i_pages)
#else #else
#include <asm-generic/cacheflush.h>
#undef flush_icache_range
#undef flush_icache_page
#undef flush_icache_user_range
void flush_icache_user_range(struct vm_area_struct *vma, struct page *page, void flush_icache_user_range(struct vm_area_struct *vma, struct page *page,
unsigned long addr, int len); unsigned long addr, int len);
#define flush_icache_user_range flush_icache_user_range
#include <asm-generic/cacheflush.h>
#endif #endif
#endif /* __NDS32_CACHEFLUSH_H__ */ #endif /* __NDS32_CACHEFLUSH_H__ */
...@@ -195,7 +195,7 @@ extern void paging_init(void); ...@@ -195,7 +195,7 @@ extern void paging_init(void);
#define pte_unmap(pte) do { } while (0) #define pte_unmap(pte) do { } while (0)
#define pte_unmap_nested(pte) do { } while (0) #define pte_unmap_nested(pte) do { } while (0)
#define pmd_off_k(address) pmd_offset(pgd_offset_k(address), address) #define pmd_off_k(address) pmd_offset(pud_offset(p4d_offset(pgd_offset_k(address), (address)), (address)), (address))
#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval) #define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
/* /*
......
...@@ -11,71 +11,102 @@ ...@@ -11,71 +11,102 @@
* The cache doesn't need to be flushed when TLB entries change when * The cache doesn't need to be flushed when TLB entries change when
* the cache is mapped to physical memory, not virtual memory * the cache is mapped to physical memory, not virtual memory
*/ */
#ifndef flush_cache_all
static inline void flush_cache_all(void) static inline void flush_cache_all(void)
{ {
} }
#endif
#ifndef flush_cache_mm
static inline void flush_cache_mm(struct mm_struct *mm) static inline void flush_cache_mm(struct mm_struct *mm)
{ {
} }
#endif
#ifndef flush_cache_dup_mm
static inline void flush_cache_dup_mm(struct mm_struct *mm) static inline void flush_cache_dup_mm(struct mm_struct *mm)
{ {
} }
#endif
#ifndef flush_cache_range
static inline void flush_cache_range(struct vm_area_struct *vma, static inline void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long start,
unsigned long end) unsigned long end)
{ {
} }
#endif
#ifndef flush_cache_page
static inline void flush_cache_page(struct vm_area_struct *vma, static inline void flush_cache_page(struct vm_area_struct *vma,
unsigned long vmaddr, unsigned long vmaddr,
unsigned long pfn) unsigned long pfn)
{ {
} }
#endif
#ifndef flush_dcache_page
static inline void flush_dcache_page(struct page *page) static inline void flush_dcache_page(struct page *page)
{ {
} }
#endif
#ifndef flush_dcache_mmap_lock
static inline void flush_dcache_mmap_lock(struct address_space *mapping) static inline void flush_dcache_mmap_lock(struct address_space *mapping)
{ {
} }
#endif
#ifndef flush_dcache_mmap_unlock
static inline void flush_dcache_mmap_unlock(struct address_space *mapping) static inline void flush_dcache_mmap_unlock(struct address_space *mapping)
{ {
} }
#endif
#ifndef flush_icache_range
static inline void flush_icache_range(unsigned long start, unsigned long end) static inline void flush_icache_range(unsigned long start, unsigned long end)
{ {
} }
#endif
#ifndef flush_icache_page
static inline void flush_icache_page(struct vm_area_struct *vma, static inline void flush_icache_page(struct vm_area_struct *vma,
struct page *page) struct page *page)
{ {
} }
#endif
#ifndef flush_icache_user_range
static inline void flush_icache_user_range(struct vm_area_struct *vma, static inline void flush_icache_user_range(struct vm_area_struct *vma,
struct page *page, struct page *page,
unsigned long addr, int len) unsigned long addr, int len)
{ {
} }
#endif
#ifndef flush_cache_vmap
static inline void flush_cache_vmap(unsigned long start, unsigned long end) static inline void flush_cache_vmap(unsigned long start, unsigned long end)
{ {
} }
#endif
#ifndef flush_cache_vunmap
static inline void flush_cache_vunmap(unsigned long start, unsigned long end) static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
{ {
} }
#endif
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ #ifndef copy_to_user_page
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \
do { \ do { \
memcpy(dst, src, len); \ memcpy(dst, src, len); \
flush_icache_user_range(vma, page, vaddr, len); \ flush_icache_user_range(vma, page, vaddr, len); \
} while (0) } while (0)
#endif
#ifndef copy_from_user_page
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ #define copy_from_user_page(vma, page, vaddr, dst, src, len) \
memcpy(dst, src, len) memcpy(dst, src, len)
#endif
#endif /* __ASM_CACHEFLUSH_H */ #endif /* __ASM_CACHEFLUSH_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment