From: Andi Kleen <a...@linux.intel.com> With LBRv5 reading the extra LBR flags like mispredict, TSX, cycles is not free anymore, as it has moved to a separate MSR.
For callstack mode we don't need any of this information; so we can avoid the unnecessary MSR read. Add flags to the perf interface where perf record can request not collecting this information. I added sample_type flags for CYCLES and FLAGS. It's a bit unusual for sample_types to be negative (disable), not positive (enable), but since the legacy ABI reported the flags we need some form of explicit disabling to avoid breaking the ABI. In theory it would be possible to make CYCLES opt-in (as it's not deployed yet), but I also made it opt-out to be symmetric to FLAGS. After we have the flags the x86 perf code can keep track if any users need the flags. If noone needs it the information is not collected. Signed-off-by: Andi Kleen <a...@linux.intel.com> --- arch/x86/kernel/cpu/perf_event.h | 2 ++ arch/x86/kernel/cpu/perf_event_intel_lbr.c | 49 ++++++++++++++++++++++-------- include/uapi/linux/perf_event.h | 2 ++ 3 files changed, 40 insertions(+), 13 deletions(-) diff --git a/arch/x86/kernel/cpu/perf_event.h b/arch/x86/kernel/cpu/perf_event.h index 2860b89..c83bf07 100644 --- a/arch/x86/kernel/cpu/perf_event.h +++ b/arch/x86/kernel/cpu/perf_event.h @@ -201,6 +201,8 @@ struct cpu_hw_events { * Intel LBR bits */ int lbr_users; + int lbr_flags_users; + int lbr_cycles_users; void *lbr_context; struct perf_branch_stack lbr_stack; struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES]; diff --git a/arch/x86/kernel/cpu/perf_event_intel_lbr.c b/arch/x86/kernel/cpu/perf_event_intel_lbr.c index 1fd8b5a..5de2048 100644 --- a/arch/x86/kernel/cpu/perf_event_intel_lbr.c +++ b/arch/x86/kernel/cpu/perf_event_intel_lbr.c @@ -340,6 +340,10 @@ void intel_pmu_lbr_enable(struct perf_event *event) } cpuc->lbr_users++; + if (!(event->attr.sample_type & PERF_SAMPLE_BRANCH_NO_FLAGS)) + cpuc->lbr_flags_users++; + if (!(event->attr.sample_type & PERF_SAMPLE_BRANCH_NO_CYCLES)) + cpuc->lbr_cycles_users++; perf_sched_cb_inc(event->ctx->pmu); } @@ -358,7 +362,14 @@ void intel_pmu_lbr_disable(struct perf_event *event) } cpuc->lbr_users--; - WARN_ON_ONCE(cpuc->lbr_users < 0); + if (!(event->attr.sample_type & PERF_SAMPLE_BRANCH_NO_FLAGS)) + cpuc->lbr_flags_users--; + if (!(event->attr.sample_type & PERF_SAMPLE_BRANCH_NO_CYCLES)) + cpuc->lbr_cycles_users--; + + WARN_ON_ONCE(cpuc->lbr_users < 0 || + cpuc->lbr_flags_users < 0 || + cpuc->lbr_cycles_users < 0); perf_sched_cb_dec(event->ctx->pmu); if (cpuc->enabled && !cpuc->lbr_users) { @@ -416,7 +427,9 @@ static void intel_pmu_lbr_read_32(struct cpu_hw_events *cpuc) * is the same as the linear address, allowing us to merge the LIP and EIP * LBR formats. */ -static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc) +static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc, + bool need_flags, + bool need_cycles) { unsigned long mask = x86_pmu.lbr_nr - 1; int lbr_format = x86_pmu.intel_cap.lbr_format; @@ -434,24 +447,32 @@ static void intel_pmu_lbr_read_64(struct cpu_hw_events *cpuc) rdmsrl(x86_pmu.lbr_from + lbr_idx, from); rdmsrl(x86_pmu.lbr_to + lbr_idx, to); - if (lbr_format == LBR_FORMAT_INFO) { + if (lbr_format == LBR_FORMAT_INFO && + (need_flags || need_cycles)) { u64 info; rdmsrl(MSR_LBR_INFO_0 + lbr_idx, info); - mis = !!(info & LBR_INFO_MISPRED); - pred = !mis; - in_tx = !!(info & LBR_INFO_IN_TX); - abort = !!(info & LBR_INFO_ABORT); - cycles = (info & LBR_INFO_CYCLES); + if (need_flags) { + mis = !!(info & LBR_INFO_MISPRED); + pred = !mis; + in_tx = !!(info & LBR_INFO_IN_TX); + abort = !!(info & LBR_INFO_ABORT); + } + if (need_cycles) + cycles = (info & LBR_INFO_CYCLES); } if (lbr_flags & LBR_EIP_FLAGS) { - mis = !!(from & LBR_FROM_FLAG_MISPRED); - pred = !mis; + if (need_flags) { + mis = !!(from & LBR_FROM_FLAG_MISPRED); + pred = !mis; + } skip = 1; } if (lbr_flags & LBR_TSX) { - in_tx = !!(from & LBR_FROM_FLAG_IN_TX); - abort = !!(from & LBR_FROM_FLAG_ABORT); + if (need_flags) { + in_tx = !!(from & LBR_FROM_FLAG_IN_TX); + abort = !!(from & LBR_FROM_FLAG_ABORT); + } skip = 3; } from = (u64)((((s64)from) << skip) >> skip); @@ -490,7 +511,9 @@ void intel_pmu_lbr_read(void) if (x86_pmu.intel_cap.lbr_format == LBR_FORMAT_32) intel_pmu_lbr_read_32(cpuc); else - intel_pmu_lbr_read_64(cpuc); + intel_pmu_lbr_read_64(cpuc, + cpuc->lbr_flags_users > 0, + cpuc->lbr_cycles_users > 0); intel_pmu_lbr_filter(cpuc); } diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h index 1b8bd4a..8dd5765 100644 --- a/include/uapi/linux/perf_event.h +++ b/include/uapi/linux/perf_event.h @@ -138,6 +138,8 @@ enum perf_event_sample_format { PERF_SAMPLE_IDENTIFIER = 1U << 16, PERF_SAMPLE_TRANSACTION = 1U << 17, PERF_SAMPLE_REGS_INTR = 1U << 18, + PERF_SAMPLE_BRANCH_NO_FLAGS = 1U << 19, + PERF_SAMPLE_BRANCH_NO_CYCLES = 1U << 20, PERF_SAMPLE_MAX = 1U << 19, /* non-ABI */ }; -- 2.1.0 -- To unsubscribe from this list: send the line "unsubscribe linux-kernel" in the body of a message to majord...@vger.kernel.org More majordomo info at http://vger.kernel.org/majordomo-info.html Please read the FAQ at http://www.tux.org/lkml/