On 06/19/2015 06:30 AM, Paolo Bonzini wrote:
> 
> 
> On 12/06/2015 07:34, Wei Huang wrote:
>> This patch defines a new function pointer struct (kvm_pmu_ops) to
>> support vPMU for both Intel and AMD. The functions pointers defined in
>> this new struct will be linked with Intel and AMD functions later. In the
>> meanwhile the struct that maps from event_sel bits to PERF_TYPE_HARDWARE
>> events is renamed and moved from Intel specific code to kvm_host.h as a
>> common struct.
>>
>> Reviewed-by: Joerg Roedel <jroe...@suse.de>
>> Tested-by: Joerg Roedel <jroe...@suse.de>
>> Signed-off-by: Wei Huang <w...@redhat.com>
> 
> The kvm_pmu_ops are unused and would belong in patch 2... the code is
> good anyway, and I'm not going to be fussy about it because there's no
> semantic change anyway in this patch.
OK. This is should be relatively easy.

-Wei
> 
> Paolo
> 
>> ---
>>  arch/x86/include/asm/kvm_host.h | 34 ++++++++++++++++++++++++++++------
>>  arch/x86/kvm/pmu.c              | 21 ++++++++-------------
>>  2 files changed, 36 insertions(+), 19 deletions(-)
>>
>> diff --git a/arch/x86/include/asm/kvm_host.h 
>> b/arch/x86/include/asm/kvm_host.h
>> index 8ca32cf..82dc7cc 100644
>> --- a/arch/x86/include/asm/kvm_host.h
>> +++ b/arch/x86/include/asm/kvm_host.h
>> @@ -304,6 +304,12 @@ struct kvm_mmu {
>>      u64 pdptrs[4]; /* pae */
>>  };
>>  
>> +struct msr_data {
>> +    bool host_initiated;
>> +    u32 index;
>> +    u64 data;
>> +};
>> +
>>  enum pmc_type {
>>      KVM_PMC_GP = 0,
>>      KVM_PMC_FIXED,
>> @@ -318,6 +324,12 @@ struct kvm_pmc {
>>      struct kvm_vcpu *vcpu;
>>  };
>>  
>> +struct kvm_event_hw_type_mapping {
>> +    u8 eventsel;
>> +    u8 unit_mask;
>> +    unsigned event_type;
>> +};
>> +
>>  struct kvm_pmu {
>>      unsigned nr_arch_gp_counters;
>>      unsigned nr_arch_fixed_counters;
>> @@ -336,6 +348,22 @@ struct kvm_pmu {
>>      u64 reprogram_pmi;
>>  };
>>  
>> +struct kvm_pmu_ops {
>> +    unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select,
>> +                                u8 unit_mask);
>> +    unsigned (*find_fixed_event)(int idx);
>> +    bool (*pmc_enabled)(struct kvm_pmc *pmc);
>> +    struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
>> +    struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx);
>> +    int (*is_valid_msr_idx)(struct kvm_vcpu *vcpu, unsigned idx);
>> +    bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
>> +    int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data);
>> +    int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
>> +    void (*refresh)(struct kvm_vcpu *vcpu);
>> +    void (*init)(struct kvm_vcpu *vcpu);
>> +    void (*reset)(struct kvm_vcpu *vcpu);
>> +};
>> +
>>  enum {
>>      KVM_DEBUGREG_BP_ENABLED = 1,
>>      KVM_DEBUGREG_WONT_EXIT = 2,
>> @@ -683,12 +711,6 @@ struct kvm_vcpu_stat {
>>  
>>  struct x86_instruction_info;
>>  
>> -struct msr_data {
>> -    bool host_initiated;
>> -    u32 index;
>> -    u64 data;
>> -};
>> -
>>  struct kvm_lapic_irq {
>>      u32 vector;
>>      u16 delivery_mode;
>> diff --git a/arch/x86/kvm/pmu.c b/arch/x86/kvm/pmu.c
>> index 29fbf9d..3af3404 100644
>> --- a/arch/x86/kvm/pmu.c
>> +++ b/arch/x86/kvm/pmu.c
>> @@ -20,12 +20,7 @@
>>  #include "cpuid.h"
>>  #include "lapic.h"
>>  
>> -static struct kvm_arch_event_perf_mapping {
>> -    u8 eventsel;
>> -    u8 unit_mask;
>> -    unsigned event_type;
>> -    bool inexact;
>> -} arch_events[] = {
>> +static struct kvm_event_hw_type_mapping intel_arch_events[] = {
>>      /* Index must match CPUID 0x0A.EBX bit vector */
>>      [0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
>>      [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
>> @@ -37,7 +32,7 @@ static struct kvm_arch_event_perf_mapping {
>>      [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
>>  };
>>  
>> -/* mapping between fixed pmc index and arch_events array */
>> +/* mapping between fixed pmc index and intel_arch_events array */
>>  static int fixed_pmc_events[] = {1, 0, 7};
>>  
>>  static bool pmc_is_gp(struct kvm_pmc *pmc)
>> @@ -202,16 +197,16 @@ static unsigned find_arch_event(struct kvm_pmu *pmu, 
>> u8 event_select,
>>  {
>>      int i;
>>  
>> -    for (i = 0; i < ARRAY_SIZE(arch_events); i++)
>> -            if (arch_events[i].eventsel == event_select
>> -                            && arch_events[i].unit_mask == unit_mask
>> +    for (i = 0; i < ARRAY_SIZE(intel_arch_events); i++)
>> +            if (intel_arch_events[i].eventsel == event_select
>> +                            && intel_arch_events[i].unit_mask == unit_mask
>>                              && (pmu->available_event_types & (1 << i)))
>>                      break;
>>  
>> -    if (i == ARRAY_SIZE(arch_events))
>> +    if (i == ARRAY_SIZE(intel_arch_events))
>>              return PERF_COUNT_HW_MAX;
>>  
>> -    return arch_events[i].event_type;
>> +    return intel_arch_events[i].event_type;
>>  }
>>  
>>  static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
>> @@ -265,7 +260,7 @@ static void reprogram_fixed_counter(struct kvm_pmc *pmc, 
>> u8 en_pmi, int idx)
>>              return;
>>  
>>      reprogram_counter(pmc, PERF_TYPE_HARDWARE,
>> -                    arch_events[fixed_pmc_events[idx]].event_type,
>> +                    intel_arch_events[fixed_pmc_events[idx]].event_type,
>>                      !(en & 0x2), /* exclude user */
>>                      !(en & 0x1), /* exclude kernel */
>>                      pmi, false, false);
>>
--
To unsubscribe from this list: send the line "unsubscribe kvm" in

Reply via email to