On Tue, May 28, 2019 at 02:20:53PM -0400, Liang, Kan wrote:
> On 5/28/2019 8:05 AM, Peter Zijlstra wrote:
> > On Tue, May 21, 2019 at 02:40:48PM -0700, kan.li...@linux.intel.com wrote:

> @@ -2155,9 +2155,19 @@ static void intel_pmu_disable_event(struct perf_event 
> *event)
>               return;
>       }
>  
> -     cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->idx);
> -     cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->idx);
> -     cpuc->intel_cp_status &= ~(1ull << hwc->idx);
> +     __clear_bit(hwc->idx, cpuc->enabled_events);
> +
> +     /*
> +      * When any other slots sharing event is still enabled,
> +      * cancel the disabling.
> +      */
> +     if (is_any_slots_idx(hwc->idx) &&
> +         (*(u64 *)&cpuc->enabled_events & INTEL_PMC_MSK_ANY_SLOTS))
> +             return;
> +
> +     cpuc->intel_ctrl_guest_mask &= ~(1ull << hwc->reg_idx);
> +     cpuc->intel_ctrl_host_mask &= ~(1ull << hwc->reg_idx);
> +     cpuc->intel_cp_status &= ~(1ull << hwc->reg_idx);
>  
>       if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
>               intel_pmu_disable_fixed(hwc);

> @@ -2242,18 +2252,19 @@ static void intel_pmu_enable_event(struct perf_event 
> *event)
>       }
>  
>       if (event->attr.exclude_host)
> -             cpuc->intel_ctrl_guest_mask |= (1ull << hwc->idx);
> +             cpuc->intel_ctrl_guest_mask |= (1ull << hwc->reg_idx);
>       if (event->attr.exclude_guest)
> -             cpuc->intel_ctrl_host_mask |= (1ull << hwc->idx);
> +             cpuc->intel_ctrl_host_mask |= (1ull << hwc->reg_idx);
>  
>       if (unlikely(event_is_checkpointed(event)))
> -             cpuc->intel_cp_status |= (1ull << hwc->idx);
> +             cpuc->intel_cp_status |= (1ull << hwc->reg_idx);
>  
>       if (unlikely(event->attr.precise_ip))
>               intel_pmu_pebs_enable(event);
>  
>       if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
> -             intel_pmu_enable_fixed(event);
> +             if (!__test_and_set_bit(hwc->idx, cpuc->enabled_events))
> +                     intel_pmu_enable_fixed(event);
>               return;
>       }
>  
> diff --git a/arch/x86/events/perf_event.h b/arch/x86/events/perf_event.h
> index 7ae2912f16de..dd6c86a758f7 100644
> --- a/arch/x86/events/perf_event.h
> +++ b/arch/x86/events/perf_event.h
> @@ -203,6 +203,7 @@ struct cpu_hw_events {
>       unsigned long           active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
>       unsigned long           running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
>       int                     enabled;
> +     unsigned long           enabled_events[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
>  
>       int                     n_events; /* the # of events in the below 
> arrays */
>       int                     n_added;  /* the # last events in the below 
> arrays;

> > Also, why do we need that whole enabled_events[] array. Do we really not
> > have that information elsewhere?
> 
> No. We don't have a case that several events share a counter at the same
> time. We don't need to check if other events are enabled when we try to
> disable a counter. So we don't save such information.
> But we have to do it for metrics events.

So you have x86_pmu.disable() clear the bit, and x86_pmu.enable() set
the bit, and then, if you look at arch/x86/events/core.c that doesn't
look redundant?

That is, explain to me how exactly this new enabled_events[] is different
from active_mask[].

Reply via email to