On Mon, Mar 29, 2021 at 01:41:27PM +0800, Like Xu wrote:

> diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
> index 827886c12c16..3509b18478b9 100644
> --- a/arch/x86/kvm/pmu.c
> +++ b/arch/x86/kvm/pmu.c
> @@ -74,11 +74,20 @@ static void kvm_perf_overflow_intr(struct perf_event 
> *perf_event,
>  {
>       struct kvm_pmc *pmc = perf_event->overflow_handler_context;
>       struct kvm_pmu *pmu = pmc_to_pmu(pmc);
> +     bool skip_pmi = false;
>  
>       if (!test_and_set_bit(pmc->idx, pmu->reprogram_pmi)) {
> -             __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
> +             if (perf_event->attr.precise_ip) {
> +                     /* Indicate PEBS overflow PMI to guest. */
> +                     skip_pmi = 
> test_and_set_bit(GLOBAL_STATUS_BUFFER_OVF_BIT,
> +                             (unsigned long *)&pmu->global_status);

Is there actual concurrency here, or did you forget to type __?

And in case you're using vim, use something like: set cino=(0:0

> +             } else
> +                     __set_bit(pmc->idx, (unsigned long 
> *)&pmu->global_status);
>               kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
>  
> +             if (skip_pmi)
> +                     return;
> +
>               /*
>                * Inject PMI. If vcpu was in a guest mode during NMI PMI
>                * can be ejected on a guest mode re-entry. Otherwise we can't

Reply via email to