csky pmu counter may have different io width. When the counter is smaller then 64 bits and counter value is smaller than the old value, it will result to a extremely large delta value. So the sampled value should be extend to 64 bits to avoid this, the extension bits base on the reg-io-width property from dts.
Signed-off-by: Mao Han <han_...@c-sky.com> CC: Guo Ren <guo...@kernel.org> CC: linux-c...@vger.kernel.org --- arch/csky/kernel/perf_event.c | 16 +++++++++++++++- 1 file changed, 15 insertions(+), 1 deletion(-) diff --git a/arch/csky/kernel/perf_event.c b/arch/csky/kernel/perf_event.c index c022acc..f1b3cdf 100644 --- a/arch/csky/kernel/perf_event.c +++ b/arch/csky/kernel/perf_event.c @@ -18,6 +18,7 @@ static void (*hw_raw_write_mapping[CSKY_PMU_MAX_EVENTS])(uint64_t val); struct csky_pmu_t { struct pmu pmu; + uint32_t sign_extend; uint32_t hpcr; } csky_pmu; @@ -806,7 +807,13 @@ static void csky_perf_event_update(struct perf_event *event, struct hw_perf_event *hwc) { uint64_t prev_raw_count = local64_read(&hwc->prev_count); - uint64_t new_raw_count = hw_raw_read_mapping[hwc->idx](); + /* + * Extend count value to 64bit, otherwise delta calculation would + * be incorrect when overflow occurs. + */ + uint64_t new_raw_count = ((int64_t)hw_raw_read_mapping[hwc->idx]() + << csky_pmu.sign_extend) + >> csky_pmu.sign_extend; int64_t delta = new_raw_count - prev_raw_count; /* @@ -1037,6 +1044,7 @@ int csky_pmu_device_probe(struct platform_device *pdev, const struct of_device_id *of_id; csky_pmu_init init_fn; struct device_node *node = pdev->dev.of_node; + int cnt_width; int ret = -ENODEV; of_id = of_match_node(of_table, pdev->dev.of_node); @@ -1045,6 +1053,12 @@ int csky_pmu_device_probe(struct platform_device *pdev, ret = init_fn(&csky_pmu); } + if (!of_property_read_u32(node, "reg-io-width", &cnt_width)) { + csky_pmu.sign_extend = 64 - cnt_width; + } else { + csky_pmu.sign_extend = 16; + } + if (ret) { pr_notice("[perf] failed to probe PMU!\n"); return ret; -- 2.7.4