On 05/27/2016 09:01 AM, Lluís Vilanova wrote:
-void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
+static inline void do_tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx,
+ TCGMemOp memop)
{
memop = tcg_canonicalize_memop(memop, 0, 0);
gen_ldst_i32(INDEX_op_qemu_ld_i32, val, addr, memop, idx);
}
-void tcg_gen_qemu_st_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
+void tcg_gen_qemu_ld_i32(TCGv_i32 val, TCGv addr, TCGArg idx, TCGMemOp memop)
+{
+ trace_guest_mem_before_tcg(tcg_ctx.cpu, tcg_ctx.tcg_env,
+ addr, trace_mem_get_info(memop, 0));
+ do_tcg_gen_qemu_ld_i32(val, addr, idx, memop);
+}
...
void tcg_gen_qemu_ld_i64(TCGv_i64 val, TCGv addr, TCGArg idx, TCGMemOp memop)
{
+ trace_guest_mem_before_tcg(tcg_ctx.cpu, tcg_ctx.tcg_env,
+ addr, trace_mem_get_info(memop, 0));
+
if (TCG_TARGET_REG_BITS == 32 && (memop & MO_SIZE) < MO_64) {
- tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop);
+ do_tcg_gen_qemu_ld_i32(TCGV_LOW(val), addr, idx, memop);
if (memop & MO_SIGN) {
tcg_gen_sari_i32(TCGV_HIGH(val), TCGV_LOW(val), 31);
} else {
I think the better solution here is to move the tracing for 64-bit operations
below this IF, rather than fiddling around with inline functions et al.
r~