Commit b015dcd6 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-5.18/parisc-4' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux

Pull parisc architecture fixes from Helge Deller:
 "We had two big outstanding issues after v5.18-rc6:

   a) 32-bit kernels on 64-bit machines (e.g. on a C3700 which is able
      to run 32- and 64-bit kernels) failed early in userspace.

   b) 64-bit kernels on PA8800/PA8900 CPUs (e.g. in a C8000) showed
      random userspace segfaults. We assumed that those problems were
      caused by the tmpalias flushes.

  Dave did a lot of testing and reorganization of the current flush code
  and fixed the 32-bit cache flushing. For PA8800/PA8900 CPUs he
  switched the code to flush using the virtual address of user and
  kernel pages instead of using tmpalias flushes. The tmpalias flushes
  don't seem to work reliable on such CPUs.

  We tested the patches on a wide range machines (715/64, B160L, C3000,
  C3700, C8000, rp3440) and they have been in for-next without any
  conflicts.

  Summary:

   - Rewrite the cache flush code for PA8800/PA8900 CPUs to flush using
     the virtual address of user and kernel pages instead of using
     tmpalias flushes. Testing showed, that tmpalias flushes don't work
     reliably on PA8800/PA8900 CPUs

   - Fix flush code to allow 32-bit kernels to run on 64-bit capable
     machines, e.g. a 32-bit kernel on C3700 machines"

* tag 'for-5.18/parisc-4' of git://git.kernel.org/pub/scm/linux/kernel/git/deller/parisc-linux:
  parisc: Fix patch code locking and flushing
  parisc: Rewrite cache flush code for PA8800/PA8900
  parisc: Disable debug code regarding cache flushes in handle_nadtlb_fault()
parents 99b05644 798082be
...@@ -59,20 +59,12 @@ void flush_dcache_page(struct page *page); ...@@ -59,20 +59,12 @@ void flush_dcache_page(struct page *page);
flush_kernel_icache_range_asm(s,e); \ flush_kernel_icache_range_asm(s,e); \
} while (0) } while (0)
#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ void copy_to_user_page(struct vm_area_struct *vma, struct page *page,
do { \ unsigned long user_vaddr, void *dst, void *src, int len);
flush_cache_page(vma, vaddr, page_to_pfn(page)); \ void copy_from_user_page(struct vm_area_struct *vma, struct page *page,
memcpy(dst, src, len); \ unsigned long user_vaddr, void *dst, void *src, int len);
flush_kernel_dcache_range_asm((unsigned long)dst, (unsigned long)dst + len); \ void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr,
} while (0) unsigned long pfn);
#define copy_from_user_page(vma, page, vaddr, dst, src, len) \
do { \
flush_cache_page(vma, vaddr, page_to_pfn(page)); \
memcpy(dst, src, len); \
} while (0)
void flush_cache_page(struct vm_area_struct *vma, unsigned long vmaddr, unsigned long pfn);
void flush_cache_range(struct vm_area_struct *vma, void flush_cache_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end); unsigned long start, unsigned long end);
...@@ -80,16 +72,7 @@ void flush_cache_range(struct vm_area_struct *vma, ...@@ -80,16 +72,7 @@ void flush_cache_range(struct vm_area_struct *vma,
void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr); void flush_dcache_page_asm(unsigned long phys_addr, unsigned long vaddr);
#define ARCH_HAS_FLUSH_ANON_PAGE #define ARCH_HAS_FLUSH_ANON_PAGE
static inline void void flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr);
flush_anon_page(struct vm_area_struct *vma, struct page *page, unsigned long vmaddr)
{
if (PageAnon(page)) {
flush_tlb_page(vma, vmaddr);
preempt_disable();
flush_dcache_page_asm(page_to_phys(page), vmaddr);
preempt_enable();
}
}
#define ARCH_HAS_FLUSH_ON_KUNMAP #define ARCH_HAS_FLUSH_ON_KUNMAP
static inline void kunmap_flush_on_unmap(void *addr) static inline void kunmap_flush_on_unmap(void *addr)
......
...@@ -26,12 +26,14 @@ ...@@ -26,12 +26,14 @@
#define copy_page(to, from) copy_page_asm((void *)(to), (void *)(from)) #define copy_page(to, from) copy_page_asm((void *)(to), (void *)(from))
struct page; struct page;
struct vm_area_struct;
void clear_page_asm(void *page); void clear_page_asm(void *page);
void copy_page_asm(void *to, void *from); void copy_page_asm(void *to, void *from);
#define clear_user_page(vto, vaddr, page) clear_page_asm(vto) #define clear_user_page(vto, vaddr, page) clear_page_asm(vto)
void copy_user_page(void *vto, void *vfrom, unsigned long vaddr, void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr,
struct page *pg); struct vm_area_struct *vma);
#define __HAVE_ARCH_COPY_USER_HIGHPAGE
/* /*
* These are used to make use of C type-checking.. * These are used to make use of C type-checking..
......
This diff is collapsed.
...@@ -40,10 +40,7 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags, ...@@ -40,10 +40,7 @@ static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags,
*need_unmap = 1; *need_unmap = 1;
set_fixmap(fixmap, page_to_phys(page)); set_fixmap(fixmap, page_to_phys(page));
if (flags) raw_spin_lock_irqsave(&patch_lock, *flags);
raw_spin_lock_irqsave(&patch_lock, *flags);
else
__acquire(&patch_lock);
return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK)); return (void *) (__fix_to_virt(fixmap) + (uintaddr & ~PAGE_MASK));
} }
...@@ -52,10 +49,7 @@ static void __kprobes patch_unmap(int fixmap, unsigned long *flags) ...@@ -52,10 +49,7 @@ static void __kprobes patch_unmap(int fixmap, unsigned long *flags)
{ {
clear_fixmap(fixmap); clear_fixmap(fixmap);
if (flags) raw_spin_unlock_irqrestore(&patch_lock, *flags);
raw_spin_unlock_irqrestore(&patch_lock, *flags);
else
__release(&patch_lock);
} }
void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len) void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
...@@ -67,8 +61,9 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len) ...@@ -67,8 +61,9 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
int mapped; int mapped;
/* Make sure we don't have any aliases in cache */ /* Make sure we don't have any aliases in cache */
flush_kernel_vmap_range(addr, len); flush_kernel_dcache_range_asm(start, end);
flush_icache_range(start, end); flush_kernel_icache_range_asm(start, end);
flush_tlb_kernel_range(start, end);
p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags, &mapped); p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags, &mapped);
...@@ -81,8 +76,10 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len) ...@@ -81,8 +76,10 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
* We're crossing a page boundary, so * We're crossing a page boundary, so
* need to remap * need to remap
*/ */
flush_kernel_vmap_range((void *)fixmap, flush_kernel_dcache_range_asm((unsigned long)fixmap,
(p-fixmap) * sizeof(*p)); (unsigned long)p);
flush_tlb_kernel_range((unsigned long)fixmap,
(unsigned long)p);
if (mapped) if (mapped)
patch_unmap(FIX_TEXT_POKE0, &flags); patch_unmap(FIX_TEXT_POKE0, &flags);
p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags, p = fixmap = patch_map(addr, FIX_TEXT_POKE0, &flags,
...@@ -90,10 +87,10 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len) ...@@ -90,10 +87,10 @@ void __kprobes __patch_text_multiple(void *addr, u32 *insn, unsigned int len)
} }
} }
flush_kernel_vmap_range((void *)fixmap, (p-fixmap) * sizeof(*p)); flush_kernel_dcache_range_asm((unsigned long)fixmap, (unsigned long)p);
flush_tlb_kernel_range((unsigned long)fixmap, (unsigned long)p);
if (mapped) if (mapped)
patch_unmap(FIX_TEXT_POKE0, &flags); patch_unmap(FIX_TEXT_POKE0, &flags);
flush_icache_range(start, end);
} }
void __kprobes __patch_text(void *addr, u32 insn) void __kprobes __patch_text(void *addr, u32 insn)
......
...@@ -22,6 +22,8 @@ ...@@ -22,6 +22,8 @@
#include <asm/traps.h> #include <asm/traps.h>
#define DEBUG_NATLB 0
/* Various important other fields */ /* Various important other fields */
#define bit22set(x) (x & 0x00000200) #define bit22set(x) (x & 0x00000200)
#define bits23_25set(x) (x & 0x000001c0) #define bits23_25set(x) (x & 0x000001c0)
...@@ -450,8 +452,8 @@ handle_nadtlb_fault(struct pt_regs *regs) ...@@ -450,8 +452,8 @@ handle_nadtlb_fault(struct pt_regs *regs)
fallthrough; fallthrough;
case 0x380: case 0x380:
/* PDC and FIC instructions */ /* PDC and FIC instructions */
if (printk_ratelimit()) { if (DEBUG_NATLB && printk_ratelimit()) {
pr_warn("BUG: nullifying cache flush/purge instruction\n"); pr_warn("WARNING: nullifying cache flush/purge instruction\n");
show_regs(regs); show_regs(regs);
} }
if (insn & 0x20) { if (insn & 0x20) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment