Hi Zhao,
Thank you for pointing out the potential impact on Zhaoxin CPUs!
Hi Dongli,
Zhaoxin (including vendor "__shanghai__" and "centaurhauls")'s PMU is
compatible with Intel, so I have some advice for this patch.
在 2025/3/3 06:00, Dongli Zhang 写道:
[snip]
+
+static bool is_same_vendor(CPUX86State *env)
+{
+ static uint32_t host_cpuid_vendor1;
+ static uint32_t host_cpuid_vendor2;
+ static uint32_t host_cpuid_vendor3;
+
+ host_cpuid(0x0, 0, NULL, &host_cpuid_vendor1, &host_cpuid_vendor3,
+ &host_cpuid_vendor2);
+
+ return env->cpuid_vendor1 == host_cpuid_vendor1 &&
+ env->cpuid_vendor2 == host_cpuid_vendor2 &&
+ env->cpuid_vendor3 == host_cpuid_vendor3;
+}
Should we consider a special case, such as emulating Intel CPUs on a
Zhaoxin platform, or vice versa? If so, maybe we can write a
'vendor_compatible()' helper. After all, before this patchset, QEMU
supported behavior-similar CPU emulation, e.g., emulating an Intel VCPU on
a Zhaoxin PCPU.
+static void kvm_init_pmu_info(CPUState *cs)
+{
+ X86CPU *cpu = X86_CPU(cs);
+ CPUX86State *env = &cpu->env;
+
+ /*
+ * The PMU virtualization is disabled by kvm.enable_pmu=N.
+ */
+ if (kvm_pmu_disabled) {
+ return;
+ }
+
+ /*
+ * It is not supported to virtualize AMD PMU registers on Intel
+ * processors, nor to virtualize Intel PMU registers on AMD processors.
+ */
+ if (!is_same_vendor(env)) {
+ return;
+ }
ditto.
[snip]
+ /*
+ * If KVM_CAP_PMU_CAPABILITY is not supported, there is no way to
+ * disable the AMD pmu virtualization.
+ *
+ * If KVM_CAP_PMU_CAPABILITY is supported !cpu->enable_pmu
+ * indicates the KVM has already disabled the PMU virtualization.
+ */
+ if (has_pmu_cap && !cpu->enable_pmu) {
+ return;
+ }
+
+ if (IS_INTEL_CPU(env)) {
+ kvm_init_pmu_info_intel(env);
We can use "if (IS_INTEL_CPU(env) || IS_ZHAOXIN_CPU(env))" instead. This
helper was introduced to QEMU in commit 5d20aa540b.
The function name kvm_init_pmu_info_"intel"() is acceptable since the
current Zhaoxin and Intel PMU architectures are compatible. However,
if Zhaoxin develop any exclusive features in the future, we can always
implement a separate "zhaoxin" version of the PMU info initialization
function.
+ } else if (IS_AMD_CPU(env)) {
+ kvm_init_pmu_info_amd(env);
+ }
+}
+
[snip]
int kvm_arch_init_vcpu(CPUState *cs)
{
struct {
@@ -2288,7 +2376,7 @@ int kvm_arch_init_vcpu(CPUState *cs)
cpuid_i = kvm_x86_build_cpuid(env, cpuid_data.entries, cpuid_i);
cpuid_data.cpuid.nent = cpuid_i;
- kvm_init_pmu_info(env);
+ kvm_init_pmu_info(cs);
if (((env->cpuid_version >> 8)&0xF) >= 6
&& (env->features[FEAT_1_EDX] & (CPUID_MCE | CPUID_MCA)) ==
@@ -4064,7 +4152,7 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL,
env->poll_control_msr);
}
- if (has_pmu_version > 0) {
+ if (IS_INTEL_CPU(env) && has_pmu_version > 0) {
Also use 'if (IS_INTEL_CPU(env) || IS_ZHAOXIN_CPU(env))' instead.
if (has_pmu_version > 1) {
/* Stop the counter. */
kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
@@ -4095,6 +4183,38 @@ static int kvm_put_msrs(X86CPU *cpu, int level)
env->msr_global_ctrl);
}
}
+
+ if (IS_AMD_CPU(env) && has_pmu_version > 0) {
+ uint32_t sel_base = MSR_K7_EVNTSEL0;
+ uint32_t ctr_base = MSR_K7_PERFCTR0;
...
[snip]
@@ -4542,7 +4662,8 @@ static int kvm_get_msrs(X86CPU *cpu)
if (env->features[FEAT_KVM] & CPUID_KVM_POLL_CONTROL) {
kvm_msr_entry_add(cpu, MSR_KVM_POLL_CONTROL, 1);
}
- if (has_pmu_version > 0) {
+
+ if (IS_INTEL_CPU(env) && has_pmu_version > 0) {
Also use 'if (IS_INTEL_CPU(env) || IS_ZHAOXIN_CPU(env))' instead.
if (has_pmu_version > 1) {
kvm_msr_entry_add(cpu, MSR_CORE_PERF_FIXED_CTR_CTRL, 0);
kvm_msr_entry_add(cpu, MSR_CORE_PERF_GLOBAL_CTRL, 0);
@@ -4558,6 +4679,35 @@ static int kvm_get_msrs(X86CPU *cpu)
}
}
+ if (IS_AMD_CPU(env) && has_pmu_version > 0) {
+ uint32_t sel_base = MSR_K7_EVNTSEL0;
+ uint32_t ctr_base = MSR_K7_PERFCTR0;
...