(+EwanHai for zhaoxin case...) ...
> -static void kvm_init_pmu_info(CPUX86State *env) > +static void kvm_init_pmu_info_intel(CPUX86State *env) > { > uint32_t eax, edx; > uint32_t unused; > @@ -2106,6 +2106,94 @@ static void kvm_init_pmu_info(CPUX86State *env) > } > } > > +static void kvm_init_pmu_info_amd(CPUX86State *env) > +{ > + uint32_t unused; > + int64_t family; > + uint32_t ecx; > + > + has_pmu_version = 0; > + > + /* > + * To determine the CPU family, the following code is derived from > + * x86_cpuid_version_get_family(). > + */ > + family = (env->cpuid_version >> 8) & 0xf; > + if (family == 0xf) { > + family += (env->cpuid_version >> 20) & 0xff; > + } > + > + /* > + * Performance-monitoring supported from K7 and later. > + */ > + if (family < 6) { > + return; > + } I understand we can get family by object_property_get_int() helper: diff --git a/target/i386/kvm/kvm.c b/target/i386/kvm/kvm.c index 4902694129f9..ff08c7bfee6c 100644 --- a/target/i386/kvm/kvm.c +++ b/target/i386/kvm/kvm.c @@ -2106,27 +2106,22 @@ static void kvm_init_pmu_info_intel(CPUX86State *env) } } -static void kvm_init_pmu_info_amd(CPUX86State *env) +static void kvm_init_pmu_info_amd(X86CPU *cpu) { + CPUX86State *env = &cpu->env; uint32_t eax, ebx, ecx; uint32_t unused; int64_t family; has_pmu_version = 0; - /* - * To determine the CPU family, the following code is derived from - * x86_cpuid_version_get_family(). - */ - family = (env->cpuid_version >> 8) & 0xf; - if (family == 0xf) { - family += (env->cpuid_version >> 20) & 0xff; + family = object_property_get_int(OBJECT(cpu), "family", &error_abort); + if (family < 0) { + return; } - /* - * Performance-monitoring supported from K7 and later. - */ if (family < 6) { + error_report("AMD performance-monitoring is supported from K7 and later") return; } @@ -2197,7 +2192,7 @@ static void kvm_init_pmu_info(CPUState *cs) if (IS_INTEL_CPU(env)) { kvm_init_pmu_info_intel(env); } else if (IS_AMD_CPU(env)) { - kvm_init_pmu_info_amd(env); + kvm_init_pmu_info_amd(cpu); } } --- Then for consistency, kvm_init_pmu_info_intel() could also accept "X86CPU *cpu" as the argument. > + has_pmu_version = 1; > + > + cpu_x86_cpuid(env, 0x80000001, 0, &unused, &unused, &ecx, &unused); > + > + if (!(ecx & CPUID_EXT3_PERFCORE)) { > + num_pmu_gp_counters = AMD64_NUM_COUNTERS; > + return; > + } > + > + num_pmu_gp_counters = AMD64_NUM_COUNTERS_CORE; > +} ... > +static void kvm_init_pmu_info(CPUState *cs) > +{ > + X86CPU *cpu = X86_CPU(cs); > + CPUX86State *env = &cpu->env; > + > + /* > + * The PMU virtualization is disabled by kvm.enable_pmu=N. > + */ > + if (kvm_pmu_disabled) { > + return; > + } As I said in patch 7, we could return an error instead. > + /* > + * It is not supported to virtualize AMD PMU registers on Intel > + * processors, nor to virtualize Intel PMU registers on AMD processors. > + */ > + if (!is_same_vendor(env)) { Here it deserves a warning like: error_report("host doesn't support requested feature: vPMU\n"); > + return; > + } > > + /* > + * If KVM_CAP_PMU_CAPABILITY is not supported, there is no way to > + * disable the AMD pmu virtualization. > + * > + * If KVM_CAP_PMU_CAPABILITY is supported !cpu->enable_pmu > + * indicates the KVM has already disabled the PMU virtualization. > + */ > + if (has_pmu_cap && !cpu->enable_pmu) { > + return; > + } Could we only check "cpu->enable_pmu" at the beginning of this function? then if pmu is already disabled, we don't need to initialize the pmu info. > + if (IS_INTEL_CPU(env)) { Zhaoxin also supports architectural PerfMon in 0xa. I'm not sure if this check should also involve Zhaoxin CPU, so cc zhaoxin guys for double check. > + kvm_init_pmu_info_intel(env); > + } else if (IS_AMD_CPU(env)) { > + kvm_init_pmu_info_amd(env); > + } > +} > + > int kvm_arch_init_vcpu(CPUState *cs) > { > struct { > @@ -2288,7 +2376,7 @@ int kvm_arch_init_vcpu(CPUState *cs) > cpuid_i = kvm_x86_build_cpuid(env, cpuid_data.entries, cpuid_i); > cpuid_data.cpuid.nent = cpuid_i; > > - kvm_init_pmu_info(env); > + kvm_init_pmu_info(cs); > > if (((env->cpuid_version >> 8)&0xF) >= 6 > && (env->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) == > @@ -4064,7 +4152,7 @@ static int kvm_put_msrs(X86CPU *cpu, int level) > kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, > env->poll_control_msr); > } > > - if (has_pmu_version > 0) { > + if (IS_INTEL_CPU(env) && has_pmu_version > 0) { ditto. > if (has_pmu_version > 1) { > /* Stop the counter. */ > kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0); > @@ -4095,6 +4183,38 @@ static int kvm_put_msrs(X86CPU *cpu, int level) > env->msr_global_ctrl); > } > } > + ... > /* > * Hyper-V partition-wide MSRs: to avoid clearing them on cpu > hot-add, > * only sync them to KVM on the first cpu > @@ -4542,7 +4662,8 @@ static int kvm_get_msrs(X86CPU *cpu) > if (env->features[FEAT_KVM] & CPUID_KVM_POLL_CONTROL) { > kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, 1); > } > - if (has_pmu_version > 0) { > + > + if (IS_INTEL_CPU(env) && has_pmu_version > 0) { ditto. > if (has_pmu_version > 1) { > kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0); > kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0); > @@ -4558,6 +4679,35 @@ static int kvm_get_msrs(X86CPU *cpu) > } > } > Thanks, Zhao