Commit 8e9a8427 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'riscv-for-linus-6.2-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux

Pull RISC-V fixes from Palmer Dabbelt:
 "This is a little bigger that I'd hope for this late in the cycle, but
  they're all pretty concrete fixes and the only one that's bigger than
  a few lines is pmdp_collapse_flush() (which is almost all
  boilerplate/comment). It's also all bug fixes for issues that have
  been around for a while.

  So I think it's not all that scary, just bad timing.

   - avoid partial TLB fences for huge pages, which are disallowed by
     the ISA

   - avoid missing a frame when dumping stacks

   - avoid misaligned accesses (and possibly overflows) in kprobes

   - fix a race condition in tracking page dirtiness"

* tag 'riscv-for-linus-6.2-rc8' of git://git.kernel.org/pub/scm/linux/kernel/git/riscv/linux:
  riscv: Fixup race condition on PG_dcache_clean in flush_icache_pte
  riscv: kprobe: Fixup misaligned load text
  riscv: stacktrace: Fix missing the first frame
  riscv: mm: Implement pmdp_collapse_flush for THP
parents 3647d2d7 950b879b
...@@ -721,6 +721,10 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma, ...@@ -721,6 +721,10 @@ static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd); page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd);
return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd))); return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd)));
} }
#define pmdp_collapse_flush pmdp_collapse_flush
extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp);
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/* /*
......
...@@ -65,16 +65,18 @@ static bool __kprobes arch_check_kprobe(struct kprobe *p) ...@@ -65,16 +65,18 @@ static bool __kprobes arch_check_kprobe(struct kprobe *p)
int __kprobes arch_prepare_kprobe(struct kprobe *p) int __kprobes arch_prepare_kprobe(struct kprobe *p)
{ {
unsigned long probe_addr = (unsigned long)p->addr; u16 *insn = (u16 *)p->addr;
if (probe_addr & 0x1) if ((unsigned long)insn & 0x1)
return -EILSEQ; return -EILSEQ;
if (!arch_check_kprobe(p)) if (!arch_check_kprobe(p))
return -EILSEQ; return -EILSEQ;
/* copy instruction */ /* copy instruction */
p->opcode = *p->addr; p->opcode = (kprobe_opcode_t)(*insn++);
if (GET_INSN_LENGTH(p->opcode) == 4)
p->opcode |= (kprobe_opcode_t)(*insn) << 16;
/* decode instruction */ /* decode instruction */
switch (riscv_probe_decode_insn(p->addr, &p->ainsn.api)) { switch (riscv_probe_decode_insn(p->addr, &p->ainsn.api)) {
......
...@@ -32,6 +32,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, ...@@ -32,6 +32,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
fp = (unsigned long)__builtin_frame_address(0); fp = (unsigned long)__builtin_frame_address(0);
sp = current_stack_pointer; sp = current_stack_pointer;
pc = (unsigned long)walk_stackframe; pc = (unsigned long)walk_stackframe;
level = -1;
} else { } else {
/* task blocked in __switch_to */ /* task blocked in __switch_to */
fp = task->thread.s[0]; fp = task->thread.s[0];
...@@ -43,7 +44,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, ...@@ -43,7 +44,7 @@ void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
unsigned long low, high; unsigned long low, high;
struct stackframe *frame; struct stackframe *frame;
if (unlikely(!__kernel_text_address(pc) || (level++ >= 1 && !fn(arg, pc)))) if (unlikely(!__kernel_text_address(pc) || (level++ >= 0 && !fn(arg, pc))))
break; break;
/* Validate frame pointer */ /* Validate frame pointer */
......
...@@ -90,8 +90,10 @@ void flush_icache_pte(pte_t pte) ...@@ -90,8 +90,10 @@ void flush_icache_pte(pte_t pte)
if (PageHuge(page)) if (PageHuge(page))
page = compound_head(page); page = compound_head(page);
if (!test_and_set_bit(PG_dcache_clean, &page->flags)) if (!test_bit(PG_dcache_clean, &page->flags)) {
flush_icache_all(); flush_icache_all();
set_bit(PG_dcache_clean, &page->flags);
}
} }
#endif /* CONFIG_MMU */ #endif /* CONFIG_MMU */
......
...@@ -81,3 +81,23 @@ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) ...@@ -81,3 +81,23 @@ int pmd_free_pte_page(pmd_t *pmd, unsigned long addr)
} }
#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */ #endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
unsigned long address, pmd_t *pmdp)
{
pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp);
VM_BUG_ON(address & ~HPAGE_PMD_MASK);
VM_BUG_ON(pmd_trans_huge(*pmdp));
/*
* When leaf PTE entries (regular pages) are collapsed into a leaf
* PMD entry (huge page), a valid non-leaf PTE is converted into a
* valid leaf PTE at the level 1 page table. Since the sfence.vma
* forms that specify an address only apply to leaf PTEs, we need a
* global flush here. collapse_huge_page() assumes these flushes are
* eager, so just do the fence here.
*/
flush_tlb_mm(vma->vm_mm);
return pmd;
}
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment