On Thu, Jan 7, 2016 at 6:12 AM, Jan Beulich <jbeul...@suse.com> wrote:

> >>> On 05.01.16 at 02:43, <bgr...@netflix.com> wrote:
> > This introduces a way to have a restricted VPMU, by specifying one of two
> > predefined groups of PMCs to make available. For secure environments,
> this
> > allows the VPMU to be used without needing to enable all PMCs.
> >
> > Signed-off-by: Brendan Gregg <bgr...@netflix.com>
> > Reviewed-by: Boris Ostrovsky <boris.ostrov...@oracle.com>
>
> I was about to apply with
>
> >  static DEFINE_PER_CPU(struct vcpu *, last_vcpu);
> >
> > -static void __init parse_vpmu_param(char *s)
> > +static int parse_vpmu_param(char *s, int len)
>
> static bool_t __init parse_vpmu_param(char *s, unsigned int len)
>

Ok.


>
> >  {
> > +    if ( ! *s || ! len )
>
>     if ( !*s || !len )
>
>
Ok.


> > +        return 0;
> > +    if ( !strncmp(s, "bts", len) )
> > +        vpmu_features |= XENPMU_FEATURE_INTEL_BTS;
> > +    else if ( !strncmp(s, "ipc", len) )
> > +        vpmu_features |= XENPMU_FEATURE_IPC_ONLY;
> > +    else if ( !strncmp(s, "arch", len) )
> > +        vpmu_features |= XENPMU_FEATURE_ARCH_ONLY;
> > +    else
> > +        return 1;
> > +    return 0;
> > +}
> > +
> > +static void __init parse_vpmu_params(char *s)
> > +{
> > +    char *sep, *p = s;
> > +
> >      switch ( parse_bool(s) )
> >      {
> >      case 0:
> >          break;
> >      default:
> > -        if ( !strcmp(s, "bts") )
> > -            vpmu_features |= XENPMU_FEATURE_INTEL_BTS;
> > -        else if ( *s )
> > +        while (1)
> >          {
> > -            printk("VPMU: unknown flag: %s - vpmu disabled!\n", s);
> > -            break;
> > +            sep = strchr(p, ',');
> > +            if ( sep == NULL )
> > +                sep = strchr(p, 0);
> > +            if ( parse_vpmu_param(p, sep - p) )
> > +                goto error;
> > +            if ( *sep == 0 )
>
>             if ( !*sep )
>
>
Ok.


> > --- a/xen/arch/x86/cpu/vpmu_intel.c
> > +++ b/xen/arch/x86/cpu/vpmu_intel.c
> > @@ -604,12 +604,17 @@ static int core2_vpmu_do_wrmsr(unsigned int msr,
> > uint64_t msr_content,
> >                   "MSR_PERF_GLOBAL_STATUS(0x38E)!\n");
> >          return -EINVAL;
> >      case MSR_IA32_PEBS_ENABLE:
> > +        if ( vpmu_features & (XENPMU_FEATURE_IPC_ONLY |
> > +             XENPMU_FEATURE_ARCH_ONLY) )
>
>                               XENPMU_FEATURE_ARCH_ONLY) )
>
>
Ok, yes, neater.


> > @@ -656,12 +661,55 @@ static int core2_vpmu_do_wrmsr(unsigned int msr,
> uint64_t msr_content,
> >          tmp = msr - MSR_P6_EVNTSEL(0);
> >          if ( tmp >= 0 && tmp < arch_pmc_cnt )
> >          {
> > +            bool_t blocked = 0;
> > +            uint64_t umaskevent;
> >              struct xen_pmu_cntr_pair *xen_pmu_cntr_pair =
> >                  vpmu_reg_pointer(core2_vpmu_cxt, arch_counters);
> >
> >              if ( msr_content & ARCH_CTRL_MASK )
> >                  return -EINVAL;
> >
> > +            /* PMC filters */
> > +            umaskevent = msr_content & MSR_IA32_CMT_EVTSEL_UE_MASK;
> > +            if ( vpmu_features & XENPMU_FEATURE_IPC_ONLY ||
> > +                 vpmu_features & XENPMU_FEATURE_ARCH_ONLY )
>
>             if ( vpmu_features & (XENPMU_FEATURE_IPC_ONLY |
>                                   XENPMU_FEATURE_ARCH_ONLY) )
>
>
Ok.


> > +            {
> > +                blocked = 1;
> > +                switch ( umaskevent )
> > +                {
> > +                /*
> > +                 * See the Pre-Defined Architectural Performance Events
> table
> > +                 * from the Intel 64 and IA-32 Architectures Software
> > +                 * Developer's Manual, Volume 3B, System Programming
> Guide,
> > +                 * Part 2.
> > +                 */
> > +                case 0x003c: /* UnHalted Core Cycles */
> > +                case 0x013c: /* UnHalted Reference Cycles */
> > +                case 0x00c0: /* Instructions Retired */
> > +                    blocked = 0;
> > +                default:
> > +                    break;
>
> dropped last two lines
>
>
Ok.


> > +                }
> > +            }
> > +
> > +            if ( vpmu_features & XENPMU_FEATURE_ARCH_ONLY )
> > +            {
> > +                /* additional counters beyond IPC only; blocked already
> set */
>
>                 /* Additional counters beyond IPC only; blocked already
> set. */
>
> > +                switch ( umaskevent )
> > +                {
> > +                case 0x4f2e: /* Last Level Cache References */
> > +                case 0x412e: /* Last Level Cache Misses */
> > +                case 0x00c4: /* Branch Instructions Retired */
> > +                case 0x00c5: /* All Branch Mispredict Retired */
> > +                    blocked = 0;
> > +                default:
> > +                    break;
>
> Again
>

Ok.


>
> > --- a/xen/include/public/pmu.h
> > +++ b/xen/include/public/pmu.h
> > @@ -84,9 +84,19 @@ DEFINE_XEN_GUEST_HANDLE(xen_pmu_params_t);
> >
> >  /*
> >   * PMU features:
> > - * - XENPMU_FEATURE_INTEL_BTS: Intel BTS support (ignored on AMD)
> > + * - XENPMU_FEATURE_INTEL_BTS:  Intel BTS support (ignored on AMD)
> > + * - XENPMU_FEATURE_IPC_ONLY:   Restrict PMCs to the most minimum set
> possible.
> > + *                              Instructions, cycles, and ref cycles.
> Can be
> > + *                              used to calculate
> instructions-per-cycle (IPC)
> > + *                              (ignored on AMD).
> > + * - XENPMU_FEATURE_ARCH_ONLY:  Restrict PMCs to the Intel Pre-Defined
> > + *                              Architectural Performance Events
> exposed by
> > + *                              cpuid and listed in the Intel
> developer's manual
> > + *                              (ignored on AMD).
> >   */
> > -#define XENPMU_FEATURE_INTEL_BTS  1
> > +#define XENPMU_FEATURE_INTEL_BTS  (1<<0)
> > +#define XENPMU_FEATURE_IPC_ONLY   (1<<1)
> > +#define XENPMU_FEATURE_ARCH_ONLY  (1<<2)
>
> when I reached this: Do these additions really need to go here?
> And if they do, why does do_xenpmu_op()'s XENPMU_feature_set
> case not get altered?
>

I'd say they do, as it's pmu.h, and it places them with the BTS feature,
other PMU modes, etc. Unless I'm missing some consideration.

Yes, I missed the new sysfs PMU interface, so do_xenpmu_op() should do:

    case XENPMU_feature_set:
        if ( pmu_params.val & ~(XENPMU_FEATURE_INTEL_BTS |
                                XENPMU_FEATURE_IPC_ONLY |
                                XENPMU_FEATURE_ARCH_ONLY))
            return -EINVAL;

I can send along a v6 patch with the other changes too if that's desirable.
Thanks, and sorry for the nuisance.

Brendan

-- 
Brendan Gregg, Senior Performance Architect, Netflix
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel

Reply via email to