Le 11/02/2020 à 06:33, Jordan Niethe a écrit :
A prefixed instruction is composed of a word prefix followed by a word
suffix. It does not make sense to be able to have a kprobe on the suffix
of a prefixed instruction, so make this impossible.
Kprobes work by replacing an instruction with a trap and saving that
instruction to be single stepped out of place later. Currently there is
not enough space allocated to keep a prefixed instruction for single
stepping. Increase the amount of space allocated for holding the
instruction copy.
kprobe_post_handler() expects all instructions to be 4 bytes long which
means that it does not function correctly for prefixed instructions.
Add checks for prefixed instructions which will use a length of 8 bytes
instead.
For optprobes we normally patch in loading the instruction we put a
probe on into r4 before calling emulate_step(). We now make space and
patch in loading the suffix into r5 as well.
Signed-off-by: Jordan Niethe <jniet...@gmail.com>
---
arch/powerpc/include/asm/kprobes.h | 5 +--
arch/powerpc/kernel/kprobes.c | 47 +++++++++++++++++++++-------
arch/powerpc/kernel/optprobes.c | 32 ++++++++++---------
arch/powerpc/kernel/optprobes_head.S | 6 ++++
4 files changed, 63 insertions(+), 27 deletions(-)
diff --git a/arch/powerpc/include/asm/kprobes.h
b/arch/powerpc/include/asm/kprobes.h
index 66b3f2983b22..0d44ce8a3163 100644
--- a/arch/powerpc/include/asm/kprobes.h
+++ b/arch/powerpc/include/asm/kprobes.h
@@ -38,12 +38,13 @@ extern kprobe_opcode_t optprobe_template_entry[];
extern kprobe_opcode_t optprobe_template_op_address[];
extern kprobe_opcode_t optprobe_template_call_handler[];
extern kprobe_opcode_t optprobe_template_insn[];
+extern kprobe_opcode_t optprobe_template_suffix[];
extern kprobe_opcode_t optprobe_template_call_emulate[];
extern kprobe_opcode_t optprobe_template_ret[];
extern kprobe_opcode_t optprobe_template_end[];
-/* Fixed instruction size for powerpc */
-#define MAX_INSN_SIZE 1
+/* Prefixed instructions are two words */
+#define MAX_INSN_SIZE 2
#define MAX_OPTIMIZED_LENGTH sizeof(kprobe_opcode_t) /* 4 bytes */
#define MAX_OPTINSN_SIZE (optprobe_template_end -
optprobe_template_entry)
#define RELATIVEJUMP_SIZE sizeof(kprobe_opcode_t) /* 4 bytes */
diff --git a/arch/powerpc/kernel/kprobes.c b/arch/powerpc/kernel/kprobes.c
index 24a56f062d9e..b061deba4fe7 100644
--- a/arch/powerpc/kernel/kprobes.c
+++ b/arch/powerpc/kernel/kprobes.c
@@ -104,17 +104,30 @@ kprobe_opcode_t *kprobe_lookup_name(const char *name,
unsigned int offset)
int arch_prepare_kprobe(struct kprobe *p)
{
+ int len;
int ret = 0;
+ struct kprobe *prev;
kprobe_opcode_t insn = *p->addr;
+ kprobe_opcode_t prefix = *(p->addr - 1);
+ preempt_disable();
if ((unsigned long)p->addr & 0x03) {
printk("Attempt to register kprobe at an unaligned address\n");
ret = -EINVAL;
} else if (IS_MTMSRD(insn) || IS_RFID(insn) || IS_RFI(insn)) {
printk("Cannot register a kprobe on rfi/rfid or mtmsr[d]\n");
ret = -EINVAL;
+ } else if (IS_PREFIX(prefix)) {
+ printk("Cannot register a kprobe on the second word of prefixed
instruction\n");
+ ret = -EINVAL;
+ }
+ prev = get_kprobe(p->addr - 1);
+ if (prev && IS_PREFIX(*prev->ainsn.insn)) {
+ printk("Cannot register a kprobe on the second word of prefixed
instruction\n");
+ ret = -EINVAL;
}
+
/* insn must be on a special executable page on ppc64. This is
* not explicitly required on ppc32 (right now), but it doesn't hurt */
if (!ret) {
@@ -124,14 +137,18 @@ int arch_prepare_kprobe(struct kprobe *p)
}
if (!ret) {
- memcpy(p->ainsn.insn, p->addr,
- MAX_INSN_SIZE * sizeof(kprobe_opcode_t));
+ if (IS_PREFIX(insn))
+ len = MAX_INSN_SIZE * sizeof(kprobe_opcode_t);
+ else
+ len = sizeof(kprobe_opcode_t);
+ memcpy(p->ainsn.insn, p->addr, len);
This code is about to get changed, see
https://patchwork.ozlabs.org/patch/1232619/
p->opcode = *p->addr;
flush_icache_range((unsigned long)p->ainsn.insn,
(unsigned long)p->ainsn.insn + sizeof(kprobe_opcode_t));
}
p->ainsn.boostable = 0;
+ preempt_enable_no_resched();
return ret;
}
NOKPROBE_SYMBOL(arch_prepare_kprobe);
@@ -216,10 +233,11 @@ NOKPROBE_SYMBOL(arch_prepare_kretprobe);
static int try_to_emulate(struct kprobe *p, struct pt_regs *regs)
{
int ret;
- unsigned int insn = *p->ainsn.insn;
+ unsigned int insn = p->ainsn.insn[0];
+ unsigned int suffix = p->ainsn.insn[1];
/* regs->nip is also adjusted if emulate_step returns 1 */
- ret = emulate_step(regs, insn, PPC_NO_SUFFIX);
+ ret = emulate_step(regs, insn, suffix);
if (ret > 0) {
/*
* Once this instruction has been boosted
@@ -233,7 +251,11 @@ static int try_to_emulate(struct kprobe *p, struct pt_regs
*regs)
* So, we should never get here... but, its still
* good to catch them, just in case...
*/
- printk("Can't step on instruction %x\n", insn);
+ if (!IS_PREFIX(insn))
+ printk("Can't step on instruction %x\n", insn);
+ else
+ printk("Can't step on instruction %x %x\n", insn,
+ suffix);
Maybe %x:%x as in xmon ?
Christophe