Commit c6fa8e6d authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux

Pull arm64 fixes from Will Deacon:
 "This addresses a couple of issues found with RT, a broken initrd
  message in the console log and a simple performance fix for some MMC
  workloads.

  Summary:

   - A couple of locking fixes for RT kernels
   - Avoid printing bogus initrd warnings when initrd isn't present
   - Performance fix for random mmap file readahead
   - Typo fix"

* tag 'arm64-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux:
  arm64: replace read_lock to rcu lock in call_break_hook
  arm64: Don't relocate non-existent initrd
  arm64: convert patch_lock to raw lock
  arm64: readahead: fault retry breaks mmap file read random detection
  arm64: debug: Fix typo in debug-monitors.c
parents e82fa92e 62c6c61a
...@@ -201,7 +201,7 @@ void unregister_step_hook(struct step_hook *hook) ...@@ -201,7 +201,7 @@ void unregister_step_hook(struct step_hook *hook)
} }
/* /*
* Call registered single step handers * Call registered single step handlers
* There is no Syndrome info to check for determining the handler. * There is no Syndrome info to check for determining the handler.
* So we call all the registered handlers, until the right handler is * So we call all the registered handlers, until the right handler is
* found which returns zero. * found which returns zero.
...@@ -271,20 +271,21 @@ static int single_step_handler(unsigned long addr, unsigned int esr, ...@@ -271,20 +271,21 @@ static int single_step_handler(unsigned long addr, unsigned int esr,
* Use reader/writer locks instead of plain spinlock. * Use reader/writer locks instead of plain spinlock.
*/ */
static LIST_HEAD(break_hook); static LIST_HEAD(break_hook);
static DEFINE_RWLOCK(break_hook_lock); static DEFINE_SPINLOCK(break_hook_lock);
void register_break_hook(struct break_hook *hook) void register_break_hook(struct break_hook *hook)
{ {
write_lock(&break_hook_lock); spin_lock(&break_hook_lock);
list_add(&hook->node, &break_hook); list_add_rcu(&hook->node, &break_hook);
write_unlock(&break_hook_lock); spin_unlock(&break_hook_lock);
} }
void unregister_break_hook(struct break_hook *hook) void unregister_break_hook(struct break_hook *hook)
{ {
write_lock(&break_hook_lock); spin_lock(&break_hook_lock);
list_del(&hook->node); list_del_rcu(&hook->node);
write_unlock(&break_hook_lock); spin_unlock(&break_hook_lock);
synchronize_rcu();
} }
static int call_break_hook(struct pt_regs *regs, unsigned int esr) static int call_break_hook(struct pt_regs *regs, unsigned int esr)
...@@ -292,11 +293,11 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr) ...@@ -292,11 +293,11 @@ static int call_break_hook(struct pt_regs *regs, unsigned int esr)
struct break_hook *hook; struct break_hook *hook;
int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL; int (*fn)(struct pt_regs *regs, unsigned int esr) = NULL;
read_lock(&break_hook_lock); rcu_read_lock();
list_for_each_entry(hook, &break_hook, node) list_for_each_entry_rcu(hook, &break_hook, node)
if ((esr & hook->esr_mask) == hook->esr_val) if ((esr & hook->esr_mask) == hook->esr_val)
fn = hook->fn; fn = hook->fn;
read_unlock(&break_hook_lock); rcu_read_unlock();
return fn ? fn(regs, esr) : DBG_HOOK_ERROR; return fn ? fn(regs, esr) : DBG_HOOK_ERROR;
} }
......
...@@ -85,7 +85,7 @@ bool aarch64_insn_is_branch_imm(u32 insn) ...@@ -85,7 +85,7 @@ bool aarch64_insn_is_branch_imm(u32 insn)
aarch64_insn_is_bcond(insn)); aarch64_insn_is_bcond(insn));
} }
static DEFINE_SPINLOCK(patch_lock); static DEFINE_RAW_SPINLOCK(patch_lock);
static void __kprobes *patch_map(void *addr, int fixmap) static void __kprobes *patch_map(void *addr, int fixmap)
{ {
...@@ -131,13 +131,13 @@ static int __kprobes __aarch64_insn_write(void *addr, u32 insn) ...@@ -131,13 +131,13 @@ static int __kprobes __aarch64_insn_write(void *addr, u32 insn)
unsigned long flags = 0; unsigned long flags = 0;
int ret; int ret;
spin_lock_irqsave(&patch_lock, flags); raw_spin_lock_irqsave(&patch_lock, flags);
waddr = patch_map(addr, FIX_TEXT_POKE0); waddr = patch_map(addr, FIX_TEXT_POKE0);
ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE); ret = probe_kernel_write(waddr, &insn, AARCH64_INSN_SIZE);
patch_unmap(FIX_TEXT_POKE0); patch_unmap(FIX_TEXT_POKE0);
spin_unlock_irqrestore(&patch_lock, flags); raw_spin_unlock_irqrestore(&patch_lock, flags);
return ret; return ret;
} }
......
...@@ -364,6 +364,8 @@ static void __init relocate_initrd(void) ...@@ -364,6 +364,8 @@ static void __init relocate_initrd(void)
to_free = ram_end - orig_start; to_free = ram_end - orig_start;
size = orig_end - orig_start; size = orig_end - orig_start;
if (!size)
return;
/* initrd needs to be relocated completely inside linear mapping */ /* initrd needs to be relocated completely inside linear mapping */
new_start = memblock_find_in_range(0, PFN_PHYS(max_pfn), new_start = memblock_find_in_range(0, PFN_PHYS(max_pfn),
......
...@@ -287,6 +287,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, ...@@ -287,6 +287,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
* starvation. * starvation.
*/ */
mm_flags &= ~FAULT_FLAG_ALLOW_RETRY; mm_flags &= ~FAULT_FLAG_ALLOW_RETRY;
mm_flags |= FAULT_FLAG_TRIED;
goto retry; goto retry;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment