Pierrick Bouvier <pierrick.bouv...@linaro.org> writes: > Now we have a thread-safe equivalent of inline operation, and that all > plugins were changed to use it, there is no point to keep the old API. > > In more, it will help when we implement more functionality (conditional > callbacks), as we can assume that we operate on a scoreboard. > > Bump API version as it's a breaking change for existing plugins. > > Signed-off-by: Pierrick Bouvier <pierrick.bouv...@linaro.org> > --- > include/qemu/qemu-plugin.h | 59 ++++---------------------------------- > plugins/api.c | 29 ------------------- > 2 files changed, 6 insertions(+), 82 deletions(-) > > diff --git a/include/qemu/qemu-plugin.h b/include/qemu/qemu-plugin.h > index 55f918db1b0..3ee514f79cf 100644 > --- a/include/qemu/qemu-plugin.h > +++ b/include/qemu/qemu-plugin.h > @@ -51,11 +51,16 @@ typedef uint64_t qemu_plugin_id_t; > * > * The plugins export the API they were built against by exposing the > * symbol qemu_plugin_version which can be checked. > + * > + * Version 2: > + * Remove qemu_plugin_register_vcpu_{tb, insn, mem}_exec_inline. > + * Those functions are replaced by *_per_vcpu variants, which guarantees > + * thread-safety for operations. > */ > > extern QEMU_PLUGIN_EXPORT int qemu_plugin_version; > > -#define QEMU_PLUGIN_VERSION 1 > +#define QEMU_PLUGIN_VERSION 2
I think technically the adding new API bumps this, the deprecating the old version bumps: QEMU_PLUGIN_MIN_VERSION to the same. > > /** > * struct qemu_info_t - system information for plugins > @@ -311,25 +316,6 @@ enum qemu_plugin_op { > QEMU_PLUGIN_INLINE_ADD_U64, > }; > > -/** > - * qemu_plugin_register_vcpu_tb_exec_inline() - execution inline op > - * @tb: the opaque qemu_plugin_tb handle for the translation > - * @op: the type of qemu_plugin_op (e.g. ADD_U64) > - * @ptr: the target memory location for the op > - * @imm: the op data (e.g. 1) > - * > - * Insert an inline op to every time a translated unit executes. > - * Useful if you just want to increment a single counter somewhere in > - * memory. > - * > - * Note: ops are not atomic so in multi-threaded/multi-smp situations > - * you will get inexact results. > - */ > -QEMU_PLUGIN_API > -void qemu_plugin_register_vcpu_tb_exec_inline(struct qemu_plugin_tb *tb, > - enum qemu_plugin_op op, > - void *ptr, uint64_t imm); > - > /** > * qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu() - execution inline op > * @tb: the opaque qemu_plugin_tb handle for the translation > @@ -361,21 +347,6 @@ void qemu_plugin_register_vcpu_insn_exec_cb(struct > qemu_plugin_insn *insn, > enum qemu_plugin_cb_flags flags, > void *userdata); > > -/** > - * qemu_plugin_register_vcpu_insn_exec_inline() - insn execution inline op > - * @insn: the opaque qemu_plugin_insn handle for an instruction > - * @op: the type of qemu_plugin_op (e.g. ADD_U64) > - * @ptr: the target memory location for the op > - * @imm: the op data (e.g. 1) > - * > - * Insert an inline op to every time an instruction executes. Useful > - * if you just want to increment a single counter somewhere in memory. > - */ > -QEMU_PLUGIN_API > -void qemu_plugin_register_vcpu_insn_exec_inline(struct qemu_plugin_insn > *insn, > - enum qemu_plugin_op op, > - void *ptr, uint64_t imm); > - > /** > * qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu() - insn exec inline > op > * @insn: the opaque qemu_plugin_insn handle for an instruction > @@ -599,24 +570,6 @@ void qemu_plugin_register_vcpu_mem_cb(struct > qemu_plugin_insn *insn, > enum qemu_plugin_mem_rw rw, > void *userdata); > > -/** > - * qemu_plugin_register_vcpu_mem_inline() - register an inline op to any > memory access > - * @insn: handle for instruction to instrument > - * @rw: apply to reads, writes or both > - * @op: the op, of type qemu_plugin_op > - * @ptr: pointer memory for the op > - * @imm: immediate data for @op > - * > - * This registers a inline op every memory access generated by the > - * instruction. This provides for a lightweight but not thread-safe > - * way of counting the number of operations done. > - */ > -QEMU_PLUGIN_API > -void qemu_plugin_register_vcpu_mem_inline(struct qemu_plugin_insn *insn, > - enum qemu_plugin_mem_rw rw, > - enum qemu_plugin_op op, void *ptr, > - uint64_t imm); > - > /** > * qemu_plugin_register_vcpu_mem_inline_per_vcpu() - inline op for mem access > * @insn: handle for instruction to instrument > diff --git a/plugins/api.c b/plugins/api.c > index 132d5e0bec1..29915d3c142 100644 > --- a/plugins/api.c > +++ b/plugins/api.c > @@ -101,16 +101,6 @@ void qemu_plugin_register_vcpu_tb_exec_cb(struct > qemu_plugin_tb *tb, > } > } > > -void qemu_plugin_register_vcpu_tb_exec_inline(struct qemu_plugin_tb *tb, > - enum qemu_plugin_op op, > - void *ptr, uint64_t imm) > -{ > - if (!tb->mem_only) { > - plugin_register_inline_op(&tb->cbs[PLUGIN_CB_INLINE], > - 0, op, ptr, 0, sizeof(uint64_t), true, > imm); > - } > -} > - > void qemu_plugin_register_vcpu_tb_exec_inline_per_vcpu( > struct qemu_plugin_tb *tb, > enum qemu_plugin_op op, > @@ -140,16 +130,6 @@ void qemu_plugin_register_vcpu_insn_exec_cb(struct > qemu_plugin_insn *insn, > } > } > > -void qemu_plugin_register_vcpu_insn_exec_inline(struct qemu_plugin_insn > *insn, > - enum qemu_plugin_op op, > - void *ptr, uint64_t imm) > -{ > - if (!insn->mem_only) { > - > plugin_register_inline_op(&insn->cbs[PLUGIN_CB_INSN][PLUGIN_CB_INLINE], > - 0, op, ptr, 0, sizeof(uint64_t), true, > imm); > - } > -} > - > void qemu_plugin_register_vcpu_insn_exec_inline_per_vcpu( > struct qemu_plugin_insn *insn, > enum qemu_plugin_op op, > @@ -179,15 +159,6 @@ void qemu_plugin_register_vcpu_mem_cb(struct > qemu_plugin_insn *insn, > cb, flags, rw, udata); > } > > -void qemu_plugin_register_vcpu_mem_inline(struct qemu_plugin_insn *insn, > - enum qemu_plugin_mem_rw rw, > - enum qemu_plugin_op op, void *ptr, > - uint64_t imm) > -{ > - plugin_register_inline_op(&insn->cbs[PLUGIN_CB_MEM][PLUGIN_CB_INLINE], > - rw, op, ptr, 0, sizeof(uint64_t), true, imm); > -} > - > void qemu_plugin_register_vcpu_mem_inline_per_vcpu( > struct qemu_plugin_insn *insn, > enum qemu_plugin_mem_rw rw, -- Alex Bennée Virtualisation Tech Lead @ Linaro