> On Tue, Nov 29, 2016 at 07:07:25PM +0000, Liang, Kan wrote: > > > diff --git a/arch/x86/events/intel/core.c > > > b/arch/x86/events/intel/core.c index a74a2dbc0180..cb8522290e6a > > > 100644 > > > --- a/arch/x86/events/intel/core.c > > > +++ b/arch/x86/events/intel/core.c > > > @@ -4034,7 +4034,7 @@ __init int intel_pmu_init(void) > > > > > > /* Support full width counters using alternative MSR range */ > > > if (x86_pmu.intel_cap.full_width_write) { > > > - x86_pmu.max_period = x86_pmu.cntval_mask; > > > + x86_pmu.max_period = x86_pmu.cntval_mask >> 1; > > > x86_pmu.perfctr = MSR_IA32_PMC0; > > > pr_cont("full-width counters, "); > > > } > > > > It doesn't work. > > perf stat -x, -C1 -e cycles -- sudo taskset 0x2 ./loop 100000000000 > > 18446743727217821696,,cycles,313837854019,100.00 > > > > delta 0xffffff8000001803 new 0x1804 prev 0xffffff8000000001 > > > > I guess we need at least x86_pmu.cntval_mask >> 2 to prevent the sign > > flag set. > > Possible delta should be u64, as we know the counter cannot decrement.
Yes, the patch as below fixes the issue on my SLM. diff --git a/arch/x86/events/core.c b/arch/x86/events/core.c index 6c3b0ef..abd97e8 100644 --- a/arch/x86/events/core.c +++ b/arch/x86/events/core.c @@ -69,7 +69,7 @@ u64 x86_perf_event_update(struct perf_event *event) int shift = 64 - x86_pmu.cntval_bits; u64 prev_raw_count, new_raw_count; int idx = hwc->idx; - s64 delta; + u64 delta; if (idx == INTEL_PMC_IDX_FIXED_BTS) return 0; diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c index a74a2db..cb85222 100644 --- a/arch/x86/events/intel/core.c +++ b/arch/x86/events/intel/core.c @@ -4034,7 +4034,7 @@ __init int intel_pmu_init(void) /* Support full width counters using alternative MSR range */ if (x86_pmu.intel_cap.full_width_write) { - x86_pmu.max_period = x86_pmu.cntval_mask; + x86_pmu.max_period = x86_pmu.cntval_mask >> 1; x86_pmu.perfctr = MSR_IA32_PMC0; pr_cont("full-width counters, "); }