Extends API with three new functions: qemu_plugin_register_vcpu_{tb, insn, mem}_exec_inline_per_vcpu().
Compared to non per_vcpu versions, ptr is now a base, and current cpu_index and an offset are used to compute memory location on which operation happens (ptr + cpu_index * offset). This allows to have a thread-safe version of inline operations. Having a flexible offset is useful in case a user wants to target a memory location embedded into a struct. In this case, the offset between two memory locations will be bigger than sizeof(uint64_t). Signed-off-by: Pierrick Bouvier <pierrick.bouv...@linaro.org> --- include/qemu/qemu-plugin.h | 56 +++++++++++++++++++++++++++++++++++- plugins/api.c | 36 ++++++++++++++++++++--- plugins/qemu-plugins.symbols | 3 ++ 3 files changed, 90 insertions(+), 5 deletions(-) diff --git a/include/qemu/qemu-plugin.h b/include/qemu/qemu-plugin.h index 4daab6efd29..8a0691a760e 100644 --- a/include/qemu/qemu-plugin.h +++ b/include/qemu/qemu-plugin.h @@ -312,6 +312,25 @@ void qemu_plugin_register_vcpu_tb_exec_inline(struct qemu_plugin_tb *tb, enum qemu_plugin_op op, void *ptr, uint64_t imm); +/** + * qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu() - execution inline op + * @tb: the opaque qemu_plugin_tb handle for the translation + * @op: the type of qemu_plugin_op (e.g. ADD_U64) + * @ptr: point to memory location for the op + * @offset: offset between two memory locations + * @imm: the op data (e.g. 1) + * + * Insert an inline op, on a memory location associated to a given + * vcpu (whose address is ptr + offset * cpu_index), + * every time a translated unit executes. + */ +QEMU_PLUGIN_API +void qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu( + struct qemu_plugin_tb *tb, + enum qemu_plugin_op op, + void *ptr, size_t offset, + uint64_t imm); + /** * qemu_plugin_register_vcpu_insn_exec_cb() - register insn execution cb * @insn: the opaque qemu_plugin_insn handle for an instruction @@ -342,6 +361,23 @@ void qemu_plugin_register_vcpu_insn_exec_inline(struct qemu_plugin_insn *insn, enum qemu_plugin_op op, void *ptr, uint64_t imm); +/** + * qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu() - insn exec inline op + * @insn: the opaque qemu_plugin_insn handle for an instruction + * @op: the type of qemu_plugin_op (e.g. ADD_U64) + * @ptr: point to array of memory locations for the op + * @offset: offset between two memory locations + * @imm: the op data (e.g. 1) + * + * Insert an inline op to every time an instruction executes. + */ +QEMU_PLUGIN_API +void qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu( + struct qemu_plugin_insn *insn, + enum qemu_plugin_op op, + void *ptr, size_t offset, + uint64_t imm); + /** * qemu_plugin_tb_n_insns() - query helper for number of insns in TB * @tb: opaque handle to TB passed to callback @@ -567,7 +603,25 @@ void qemu_plugin_register_vcpu_mem_inline(struct qemu_plugin_insn *insn, enum qemu_plugin_op op, void *ptr, uint64_t imm); - +/** + * qemu_plugin_register_vcpu_mem_inline_per_vcpu() - inline op for mem access + * @insn: handle for instruction to instrument + * @rw: apply to reads, writes or both + * @op: the op, of type qemu_plugin_op + * @ptr: point to array of memory locations for the op + * @offset: offset between two memory locations + * @imm: immediate data for @op + * + * This registers a inline op every memory access generated by the + * instruction. + */ +QEMU_PLUGIN_API +void qemu_plugin_register_vcpu_mem_inline_per_vcpu( + struct qemu_plugin_insn *insn, + enum qemu_plugin_mem_rw rw, + enum qemu_plugin_op op, + void *ptr, size_t offset, + uint64_t imm); typedef void (*qemu_plugin_vcpu_syscall_cb_t)(qemu_plugin_id_t id, unsigned int vcpu_index, diff --git a/plugins/api.c b/plugins/api.c index 0fcce825680..fd6ce678501 100644 --- a/plugins/api.c +++ b/plugins/api.c @@ -97,10 +97,19 @@ void qemu_plugin_register_vcpu_tb_exec_cb(struct qemu_plugin_tb *tb, void qemu_plugin_register_vcpu_tb_exec_inline(struct qemu_plugin_tb *tb, enum qemu_plugin_op op, void *ptr, uint64_t imm) +{ + qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu(tb, op, ptr, 0, imm); +} + +void qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu( + struct qemu_plugin_tb *tb, + enum qemu_plugin_op op, + void *ptr, size_t offset, + uint64_t imm) { if (!tb->mem_only) { plugin_register_inline_op(&tb->cbs[PLUGIN_CB_INLINE], - 0, op, ptr, 0, imm); + 0, op, ptr, offset, imm); } } @@ -118,10 +127,19 @@ void qemu_plugin_register_vcpu_insn_exec_cb(struct qemu_plugin_insn *insn, void qemu_plugin_register_vcpu_insn_exec_inline(struct qemu_plugin_insn *insn, enum qemu_plugin_op op, void *ptr, uint64_t imm) +{ + qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu(insn, op, ptr, 0, imm); +} + +void qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu( + struct qemu_plugin_insn *insn, + enum qemu_plugin_op op, + void *ptr, size_t offset, + uint64_t imm) { if (!insn->mem_only) { plugin_register_inline_op(&insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_INLINE], - 0, op, ptr, 0, imm); + 0, op, ptr, offset, imm); } } @@ -137,16 +155,26 @@ void qemu_plugin_register_vcpu_mem_cb(struct qemu_plugin_insn *insn, void *udata) { plugin_register_vcpu_mem_cb(&insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_REGULAR], - cb, flags, rw, udata); + cb, flags, rw, udata); } void qemu_plugin_register_vcpu_mem_inline(struct qemu_plugin_insn *insn, enum qemu_plugin_mem_rw rw, enum qemu_plugin_op op, void *ptr, uint64_t imm) +{ + qemu_plugin_register_vcpu_mem_inline_per_vcpu(insn, rw, op, ptr, 0, imm); +} + +void qemu_plugin_register_vcpu_mem_inline_per_vcpu( + struct qemu_plugin_insn *insn, + enum qemu_plugin_mem_rw rw, + enum qemu_plugin_op op, + void *ptr, size_t offset, + uint64_t imm) { plugin_register_inline_op(&insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE], - rw, op, ptr, 0, imm); + rw, op, ptr, offset, imm); } void qemu_plugin_register_vcpu_tb_trans_cb(qemu_plugin_id_t id, diff --git a/plugins/qemu-plugins.symbols b/plugins/qemu-plugins.symbols index 71f6c90549d..56ba30e5a81 100644 --- a/plugins/qemu-plugins.symbols +++ b/plugins/qemu-plugins.symbols @@ -27,13 +27,16 @@ qemu_plugin_register_vcpu_init_cb; qemu_plugin_register_vcpu_insn_exec_cb; qemu_plugin_register_vcpu_insn_exec_inline; + qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu; qemu_plugin_register_vcpu_mem_cb; qemu_plugin_register_vcpu_mem_inline; + qemu_plugin_register_vcpu_mem_inline_per_vcpu; qemu_plugin_register_vcpu_resume_cb; qemu_plugin_register_vcpu_syscall_cb; qemu_plugin_register_vcpu_syscall_ret_cb; qemu_plugin_register_vcpu_tb_exec_cb; qemu_plugin_register_vcpu_tb_exec_inline; + qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu; qemu_plugin_register_vcpu_tb_trans_cb; qemu_plugin_reset; qemu_plugin_start_code; -- 2.43.0