The last remaining user of tlb_debug() is the set page code. We can convert this to a trace event and remove associated logging machinery. The DEBUG_TLB define remains to enable self-check code when debugging.
To avoid overly long lines in trace events I've split the event into tlb_set_page and tlb_set_page_attrs. Signed-off-by: Alex Bennée <alex.ben...@linaro.org> --- accel/tcg/cputlb.c | 27 +++++---------------------- accel/tcg/trace-events | 5 +++++ 2 files changed, 10 insertions(+), 22 deletions(-) diff --git a/accel/tcg/cputlb.c b/accel/tcg/cputlb.c index f1d4f7da44..abdc4c5d25 100644 --- a/accel/tcg/cputlb.c +++ b/accel/tcg/cputlb.c @@ -34,31 +34,16 @@ #include "exec/helper-proto.h" #include "qemu/atomic.h" -/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ +/* DEBUG_TLB turns on internal self-checks, actions are logged via + * trace-events */ /* #define DEBUG_TLB */ -/* #define DEBUG_TLB_LOG */ #ifdef DEBUG_TLB # define DEBUG_TLB_GATE 1 -# ifdef DEBUG_TLB_LOG -# define DEBUG_TLB_LOG_GATE 1 -# else -# define DEBUG_TLB_LOG_GATE 0 -# endif #else # define DEBUG_TLB_GATE 0 -# define DEBUG_TLB_LOG_GATE 0 #endif -#define tlb_debug(fmt, ...) do { \ - if (DEBUG_TLB_LOG_GATE) { \ - qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \ - ## __VA_ARGS__); \ - } else if (DEBUG_TLB_GATE) { \ - fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \ - } \ -} while (0) - #define assert_cpu_is_self(this_cpu) do { \ if (DEBUG_TLB_GATE) { \ g_assert(!cpu->created || qemu_cpu_is_self(cpu)); \ @@ -124,7 +109,6 @@ static void tlb_flush_nocheck(CPUState *cpu) assert_cpu_is_self(cpu); atomic_set(&env->tlb_flush_count, env->tlb_flush_count + 1); - tlb_debug("(count: %zu)\n", tlb_flush_count()); memset(env->tlb_table, -1, sizeof(env->tlb_table)); memset(env->tlb_v_table, -1, sizeof(env->tlb_v_table)); @@ -629,6 +613,9 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, assert_cpu_is_self(cpu); + trace_tlb_set_page(cpu->cpu_index, vaddr, paddr); + trace_tlb_set_page_attrs(*(unsigned int *) &attrs, prot, mmu_idx, size); + if (size < TARGET_PAGE_SIZE) { sz = TARGET_PAGE_SIZE; } else { @@ -644,10 +631,6 @@ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, &xlat, &sz, attrs, &prot); assert(sz >= TARGET_PAGE_SIZE); - tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx - " prot=%x idx=%d\n", - vaddr, paddr, prot, mmu_idx); - address = vaddr_page; if (size < TARGET_PAGE_SIZE) { /* diff --git a/accel/tcg/trace-events b/accel/tcg/trace-events index 0d2b1c47ac..82705632f2 100644 --- a/accel/tcg/trace-events +++ b/accel/tcg/trace-events @@ -15,6 +15,11 @@ tlb_flush_synced_schedule(int line, int from, int to) "cputlb.c:%d from_cpu=%d t tlb_flush_all_work(int vcpu) "cpu %d" tlb_flush_work(int line, int vcpu, unsigned long data) "cputlb.c:%d cpu %d, %lux" tlb_flush_work_complete(int line, int vcpu) "cputlb.c:%d cpu %d" +# +# TLB entries +# +tlb_set_page(int vcpu, unsigned long vaddr, unsigned long paddr) "cpu:%d vaddr:0x%lx paddr:0x%lx" +tlb_set_page_attrs(unsigned int attrs, int prot, int mmu_idx, int size) "attrs:0x%x prot:0x%x mmu_idx:%d size:%d" # TCG related tracing (you still need -d nochain to get a full picture # as otherwise you'll only see the first TB executed in a chain) -- 2.17.1