On 01/10/2017 01:22, Nick Desaulniers wrote:
> On Wed, Sep 27, 2017 at 02:36:03PM +0200, Paolo Bonzini wrote:
>> On 26/09/2017 19:12, Josh Triplett wrote:
>>> Does this make any other checks redundant and removable?
>>
>> It would make sense to place it in cpu_has_kvm_support instead
> 
> cpu_has_kvm_support() or cpu_has_vmx()?
> 
>> , and the same in svm.c's has_svm.
> 
> I don't follow (but I also don't know what any of these three letter
> acryonyms acronyms stand for), does svm depend on vmx or vice-versa?

Neither, one is Intel (VMX), the other is AMD (SVM).

Would this work for you?  And again, is this only with clang?

---------- 8< ------------
From: Paolo Bonzini <pbonz...@redhat.com>
Subject: [PATCH] KVM: clean up virtext.h

virtext.h does a lot of complicated checks where it could just use
boot_cpu_has instead.  Remove all the cruft and stop using it in KVM
even, because KVM can just use x86_match_cpu.

As a side effect, this fixes

arch/x86/kvm/vmx.c:64:32: warning: variable vmx_cpu_id is not needed
and will not be emitted [-Wunneeded-internal-declaration]

Reported-by: Nick Desaulniers <ndesaulni...@google.com>
Signed-off-by: Paolo Bonzini <pbonz...@redhat.com>

diff --git a/arch/x86/include/asm/virtext.h b/arch/x86/include/asm/virtext.h
index 0116b2ee9e64..c7f7d5008d75 100644
--- a/arch/x86/include/asm/virtext.h
+++ b/arch/x86/include/asm/virtext.h
@@ -25,13 +25,6 @@
  * VMX functions:
  */
 
-static inline int cpu_has_vmx(void)
-{
-       unsigned long ecx = cpuid_ecx(1);
-       return test_bit(5, &ecx); /* CPUID.1:ECX.VMX[bit 5] -> VT */
-}
-
-
 /** Disable VMX on the current CPU
  *
  * vmxoff causes a undefined-opcode exception if vmxon was not run
@@ -49,22 +42,13 @@ static inline int cpu_vmx_enabled(void)
        return __read_cr4() & X86_CR4_VMXE;
 }
 
-/** Disable VMX if it is enabled on the current CPU
- *
- * You shouldn't call this if cpu_has_vmx() returns 0.
- */
-static inline void __cpu_emergency_vmxoff(void)
-{
-       if (cpu_vmx_enabled())
-               cpu_vmxoff();
-}
-
 /** Disable VMX if it is supported and enabled on the current CPU
  */
 static inline void cpu_emergency_vmxoff(void)
 {
-       if (cpu_has_vmx())
-               __cpu_emergency_vmxoff();
+       if (boot_cpu_has(X86_FEATURE_VMX) &&
+           cpu_vmx_enabled())
+               cpu_vmxoff();
 }
 
 
@@ -74,36 +58,6 @@ static inline void cpu_emergency_vmxoff(void)
  * SVM functions:
  */
 
-/** Check if the CPU has SVM support
- *
- * You can use the 'msg' arg to get a message describing the problem,
- * if the function returns zero. Simply pass NULL if you are not interested
- * on the messages; gcc should take care of not generating code for
- * the messages on this case.
- */
-static inline int cpu_has_svm(const char **msg)
-{
-       if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD) {
-               if (msg)
-                       *msg = "not amd";
-               return 0;
-       }
-
-       if (boot_cpu_data.extended_cpuid_level < SVM_CPUID_FUNC) {
-               if (msg)
-                       *msg = "can't execute cpuid_8000000a";
-               return 0;
-       }
-
-       if (!boot_cpu_has(X86_FEATURE_SVM)) {
-               if (msg)
-                       *msg = "svm not available";
-               return 0;
-       }
-       return 1;
-}
-
-
 /** Disable SVM on the current CPU
  *
  * You should call this only if cpu_has_svm() returned true.
@@ -117,11 +71,17 @@ static inline void cpu_svm_disable(void)
        wrmsrl(MSR_EFER, efer & ~EFER_SVME);
 }
 
+static inline int cpu_svm_enabled(void)
+{
+       return rdmsrl(MSR_EFER) & EFER_SVME;
+}
+
 /** Makes sure SVM is disabled, if it is supported on the CPU
  */
 static inline void cpu_emergency_svm_disable(void)
 {
-       if (cpu_has_svm(NULL))
+       if (boot_cpu_has(X86_FEATURE_SVM) &&
+           cpu_svm_enabled())
                cpu_svm_disable();
 }
 
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 0e68f0b3cbf7..4ad26be59327 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -736,14 +736,7 @@ static void svm_init_osvw(struct kvm_vcpu *vcpu)
 
 static int has_svm(void)
 {
-       const char *msg;
-
-       if (!cpu_has_svm(&msg)) {
-               printk(KERN_INFO "has_svm: %s\n", msg);
-               return 0;
-       }
-
-       return 1;
+       return x86_match_cpu(svm_cpu_id);
 }
 
 static void svm_hardware_disable(void)
@@ -769,10 +762,6 @@ static int svm_hardware_enable(void)
        if (efer & EFER_SVME)
                return -EBUSY;
 
-       if (!has_svm()) {
-               pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me);
-               return -EINVAL;
-       }
        sd = per_cpu(svm_data, me);
        if (!sd) {
                pr_err("%s: svm_data is NULL on %d\n", __func__, me);
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index a2b804e10c95..9f6bcd9dd3c6 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -3468,7 +3468,7 @@ static void vmx_cache_reg(struct kvm_vcpu *vcpu, enum 
kvm_reg reg)
 
 static __init int cpu_has_kvm_support(void)
 {
-       return cpu_has_vmx();
+       return x86_match_cpu(vmx_cpu_id);
 }
 
 static __init int vmx_disabled_by_bios(void)

Reply via email to