Commit 63c40436 authored by Heiko Carstens's avatar Heiko Carstens Committed by Linus Torvalds

s390/kprobes: add support for pc-relative long displacement instructions

With the general-instruction extension facility (z10) a couple of
instructions with a pc-relative long displacement were introduced.  The
kprobes support for these instructions however was never implemented.

In result, if anybody ever put a probe on any of these instructions the
result would have been random behaviour after the instruction got executed
within the insn slot.

So lets add the missing handling for these instructions.  Since all of the
new instructions have 32 bit signed displacement the easiest solution is
to allocate an insn slot that is within the same 2GB area like the
original instruction and patch the displacement field.
Signed-off-by: default avatarHeiko Carstens <heiko.carstens@de.ibm.com>
Reviewed-by: default avatarMasami Hiramatsu <masami.hiramatsu.pt@hitachi.com>
Cc: Ananth N Mavinakayanahalli <ananth@in.ibm.com>
Cc: Ingo Molnar <mingo@kernel.org>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: default avatarAndrew Morton <akpm@linux-foundation.org>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent af96397d
...@@ -31,6 +31,8 @@ ...@@ -31,6 +31,8 @@
#include <linux/ptrace.h> #include <linux/ptrace.h>
#include <linux/percpu.h> #include <linux/percpu.h>
#define __ARCH_WANT_KPROBES_INSN_SLOT
struct pt_regs; struct pt_regs;
struct kprobe; struct kprobe;
...@@ -57,7 +59,7 @@ typedef u16 kprobe_opcode_t; ...@@ -57,7 +59,7 @@ typedef u16 kprobe_opcode_t;
/* Architecture specific copy of original instruction */ /* Architecture specific copy of original instruction */
struct arch_specific_insn { struct arch_specific_insn {
/* copy of original instruction */ /* copy of original instruction */
kprobe_opcode_t insn[MAX_INSN_SIZE]; kprobe_opcode_t *insn;
}; };
struct prev_kprobe { struct prev_kprobe {
......
...@@ -37,6 +37,26 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk); ...@@ -37,6 +37,26 @@ DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
struct kretprobe_blackpoint kretprobe_blacklist[] = { }; struct kretprobe_blackpoint kretprobe_blacklist[] = { };
DEFINE_INSN_CACHE_OPS(dmainsn);
static void *alloc_dmainsn_page(void)
{
return (void *)__get_free_page(GFP_KERNEL | GFP_DMA);
}
static void free_dmainsn_page(void *page)
{
free_page((unsigned long)page);
}
struct kprobe_insn_cache kprobe_dmainsn_slots = {
.mutex = __MUTEX_INITIALIZER(kprobe_dmainsn_slots.mutex),
.alloc = alloc_dmainsn_page,
.free = free_dmainsn_page,
.pages = LIST_HEAD_INIT(kprobe_dmainsn_slots.pages),
.insn_size = MAX_INSN_SIZE,
};
static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn) static int __kprobes is_prohibited_opcode(kprobe_opcode_t *insn)
{ {
switch (insn[0] >> 8) { switch (insn[0] >> 8) {
...@@ -100,9 +120,8 @@ static int __kprobes get_fixup_type(kprobe_opcode_t *insn) ...@@ -100,9 +120,8 @@ static int __kprobes get_fixup_type(kprobe_opcode_t *insn)
fixup |= FIXUP_RETURN_REGISTER; fixup |= FIXUP_RETURN_REGISTER;
break; break;
case 0xc0: case 0xc0:
if ((insn[0] & 0x0f) == 0x00 || /* larl */ if ((insn[0] & 0x0f) == 0x05) /* brasl */
(insn[0] & 0x0f) == 0x05) /* brasl */ fixup |= FIXUP_RETURN_REGISTER;
fixup |= FIXUP_RETURN_REGISTER;
break; break;
case 0xeb: case 0xeb:
switch (insn[2] & 0xff) { switch (insn[2] & 0xff) {
...@@ -134,18 +153,128 @@ static int __kprobes get_fixup_type(kprobe_opcode_t *insn) ...@@ -134,18 +153,128 @@ static int __kprobes get_fixup_type(kprobe_opcode_t *insn)
return fixup; return fixup;
} }
static int __kprobes is_insn_relative_long(kprobe_opcode_t *insn)
{
/* Check if we have a RIL-b or RIL-c format instruction which
* we need to modify in order to avoid instruction emulation. */
switch (insn[0] >> 8) {
case 0xc0:
if ((insn[0] & 0x0f) == 0x00) /* larl */
return true;
break;
case 0xc4:
switch (insn[0] & 0x0f) {
case 0x02: /* llhrl */
case 0x04: /* lghrl */
case 0x05: /* lhrl */
case 0x06: /* llghrl */
case 0x07: /* sthrl */
case 0x08: /* lgrl */
case 0x0b: /* stgrl */
case 0x0c: /* lgfrl */
case 0x0d: /* lrl */
case 0x0e: /* llgfrl */
case 0x0f: /* strl */
return true;
}
break;
case 0xc6:
switch (insn[0] & 0x0f) {
case 0x00: /* exrl */
case 0x02: /* pfdrl */
case 0x04: /* cghrl */
case 0x05: /* chrl */
case 0x06: /* clghrl */
case 0x07: /* clhrl */
case 0x08: /* cgrl */
case 0x0a: /* clgrl */
case 0x0c: /* cgfrl */
case 0x0d: /* crl */
case 0x0e: /* clgfrl */
case 0x0f: /* clrl */
return true;
}
break;
}
return false;
}
static void __kprobes copy_instruction(struct kprobe *p)
{
s64 disp, new_disp;
u64 addr, new_addr;
memcpy(p->ainsn.insn, p->addr, ((p->opcode >> 14) + 3) & -2);
if (!is_insn_relative_long(p->ainsn.insn))
return;
/*
* For pc-relative instructions in RIL-b or RIL-c format patch the
* RI2 displacement field. We have already made sure that the insn
* slot for the patched instruction is within the same 2GB area
* as the original instruction (either kernel image or module area).
* Therefore the new displacement will always fit.
*/
disp = *(s32 *)&p->ainsn.insn[1];
addr = (u64)(unsigned long)p->addr;
new_addr = (u64)(unsigned long)p->ainsn.insn;
new_disp = ((addr + (disp * 2)) - new_addr) / 2;
*(s32 *)&p->ainsn.insn[1] = new_disp;
}
static inline int is_kernel_addr(void *addr)
{
return addr < (void *)_end;
}
static inline int is_module_addr(void *addr)
{
#ifdef CONFIG_64BIT
BUILD_BUG_ON(MODULES_LEN > (1UL << 31));
if (addr < (void *)MODULES_VADDR)
return 0;
if (addr > (void *)MODULES_END)
return 0;
#endif
return 1;
}
static int __kprobes s390_get_insn_slot(struct kprobe *p)
{
/*
* Get an insn slot that is within the same 2GB area like the original
* instruction. That way instructions with a 32bit signed displacement
* field can be patched and executed within the insn slot.
*/
p->ainsn.insn = NULL;
if (is_kernel_addr(p->addr))
p->ainsn.insn = get_dmainsn_slot();
if (is_module_addr(p->addr))
p->ainsn.insn = get_insn_slot();
return p->ainsn.insn ? 0 : -ENOMEM;
}
static void __kprobes s390_free_insn_slot(struct kprobe *p)
{
if (!p->ainsn.insn)
return;
if (is_kernel_addr(p->addr))
free_dmainsn_slot(p->ainsn.insn, 0);
else
free_insn_slot(p->ainsn.insn, 0);
p->ainsn.insn = NULL;
}
int __kprobes arch_prepare_kprobe(struct kprobe *p) int __kprobes arch_prepare_kprobe(struct kprobe *p)
{ {
if ((unsigned long) p->addr & 0x01) if ((unsigned long) p->addr & 0x01)
return -EINVAL; return -EINVAL;
/* Make sure the probe isn't going on a difficult instruction */ /* Make sure the probe isn't going on a difficult instruction */
if (is_prohibited_opcode(p->addr)) if (is_prohibited_opcode(p->addr))
return -EINVAL; return -EINVAL;
if (s390_get_insn_slot(p))
return -ENOMEM;
p->opcode = *p->addr; p->opcode = *p->addr;
memcpy(p->ainsn.insn, p->addr, ((p->opcode >> 14) + 3) & -2); copy_instruction(p);
return 0; return 0;
} }
...@@ -186,6 +315,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p) ...@@ -186,6 +315,7 @@ void __kprobes arch_disarm_kprobe(struct kprobe *p)
void __kprobes arch_remove_kprobe(struct kprobe *p) void __kprobes arch_remove_kprobe(struct kprobe *p)
{ {
s390_free_insn_slot(p);
} }
static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb, static void __kprobes enable_singlestep(struct kprobe_ctlblk *kcb,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment