VMX and SVM calculate the TSC scaling ratio in a similar logic, so this
patch generalizes it to a common TSC scaling function.

Signed-off-by: Haozhong Zhang <haozhong.zh...@intel.com>
---
 arch/x86/kvm/svm.c       | 48 +++------------------------------
 arch/x86/kvm/x86.c       | 45 +++++++++++++++++++++++++++++++
 include/linux/kvm_host.h |  3 +++
 include/linux/math64.h   | 70 ++++++++++++++++++++++++++++++++++++++++++++++++
 4 files changed, 122 insertions(+), 44 deletions(-)

diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 04b58cf..d347170 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -212,7 +212,6 @@ static int nested_svm_intercept(struct vcpu_svm *svm);
 static int nested_svm_vmexit(struct vcpu_svm *svm);
 static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
                                      bool has_error_code, u32 error_code);
-static u64 __scale_tsc(u64 ratio, u64 tsc);
 
 enum {
        VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
@@ -892,21 +891,7 @@ static __init int svm_hardware_setup(void)
                kvm_enable_efer_bits(EFER_FFXSR);
 
        if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
-               u64 max;
-
                kvm_has_tsc_control = true;
-
-               /*
-                * Make sure the user can only configure tsc_khz values that
-                * fit into a signed integer.
-                * A min value is not calculated needed because it will always
-                * be 1 on all machines and a value of 0 is used to disable
-                * tsc-scaling for the vcpu.
-                */
-               max = min(0x7fffffffULL, __scale_tsc(tsc_khz, TSC_RATIO_MAX));
-
-               kvm_max_guest_tsc_khz = max;
-
                kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX;
                kvm_tsc_scaling_ratio_frac_bits = 32;
        }
@@ -973,31 +958,6 @@ static void init_sys_seg(struct vmcb_seg *seg, uint32_t 
type)
        seg->base = 0;
 }
 
-static u64 __scale_tsc(u64 ratio, u64 tsc)
-{
-       u64 mult, frac, _tsc;
-
-       mult  = ratio >> 32;
-       frac  = ratio & ((1ULL << 32) - 1);
-
-       _tsc  = tsc;
-       _tsc *= mult;
-       _tsc += (tsc >> 32) * frac;
-       _tsc += ((tsc & ((1ULL << 32) - 1)) * frac) >> 32;
-
-       return _tsc;
-}
-
-static u64 svm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
-{
-       u64 _tsc = tsc;
-
-       if (vcpu->arch.tsc_scaling_ratio != TSC_RATIO_DEFAULT)
-               _tsc = __scale_tsc(vcpu->arch.tsc_scaling_ratio, tsc);
-
-       return _tsc;
-}
-
 static void svm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 user_tsc_khz, bool 
scale)
 {
        u64 ratio;
@@ -1066,7 +1026,7 @@ static void svm_adjust_tsc_offset(struct kvm_vcpu *vcpu, 
s64 adjustment, bool ho
        if (host) {
                if (vcpu->arch.tsc_scaling_ratio != TSC_RATIO_DEFAULT)
                        WARN_ON(adjustment < 0);
-               adjustment = svm_scale_tsc(vcpu, (u64)adjustment);
+               adjustment = kvm_scale_tsc(vcpu, (u64)adjustment);
        }
 
        svm->vmcb->control.tsc_offset += adjustment;
@@ -1084,7 +1044,7 @@ static u64 svm_compute_tsc_offset(struct kvm_vcpu *vcpu, 
u64 target_tsc)
 {
        u64 tsc;
 
-       tsc = svm_scale_tsc(vcpu, rdtsc());
+       tsc = kvm_scale_tsc(vcpu, rdtsc());
 
        return target_tsc - tsc;
 }
@@ -3076,7 +3036,7 @@ static u64 svm_read_l1_tsc(struct kvm_vcpu *vcpu, u64 
host_tsc)
 {
        struct vmcb *vmcb = get_host_vmcb(to_svm(vcpu));
        return vmcb->control.tsc_offset +
-               svm_scale_tsc(vcpu, host_tsc);
+               kvm_scale_tsc(vcpu, host_tsc);
 }
 
 static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
@@ -3086,7 +3046,7 @@ static int svm_get_msr(struct kvm_vcpu *vcpu, struct 
msr_data *msr_info)
        switch (msr_info->index) {
        case MSR_IA32_TSC: {
                msr_info->data = svm->vmcb->control.tsc_offset +
-                       svm_scale_tsc(vcpu, rdtsc());
+                       kvm_scale_tsc(vcpu, rdtsc());
 
                break;
        }
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 8849e8b..29c5781 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -1328,6 +1328,39 @@ static void update_ia32_tsc_adjust_msr(struct kvm_vcpu 
*vcpu, s64 offset)
        vcpu->arch.ia32_tsc_adjust_msr += offset - curr_offset;
 }
 
+/*
+ * Multiply tsc by a fixed point number represented by ratio.
+ *
+ * The most significant 64-N bits (mult) of ratio represent the
+ * integral part of the fixed point number; the remaining N bits
+ * (frac) represent the fractional part, ie. ratio represents a fixed
+ * point number (mult + frac * 2^(-N)).
+ *
+ * N.B: we always assume not all 64 bits of ratio are used for the
+ * fractional part and the ratio has at least 1 bit for the fractional
+ * part, i.e. 0 < N < 64.
+ *
+ * N equals to kvm_tsc_scaling_ratio_frac_bits.
+ */
+static inline u64 __scale_tsc(u64 ratio, u64 tsc)
+{
+       BUG_ON(kvm_tsc_scaling_ratio_frac_bits >= 64 ||
+              kvm_tsc_scaling_ratio_frac_bits == 0);
+       return mul_u64_u64_shr(tsc, ratio, kvm_tsc_scaling_ratio_frac_bits);
+}
+
+u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc)
+{
+       u64 _tsc = tsc;
+       u64 ratio = vcpu->arch.tsc_scaling_ratio;
+
+       if (ratio != kvm_default_tsc_scaling_ratio)
+               _tsc = __scale_tsc(ratio, tsc);
+
+       return _tsc;
+}
+EXPORT_SYMBOL_GPL(kvm_scale_tsc);
+
 void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
 {
        struct kvm *kvm = vcpu->kvm;
@@ -7366,6 +7399,18 @@ int kvm_arch_hardware_setup(void)
        if (r != 0)
                return r;
 
+       /*
+        * Make sure the user can only configure tsc_khz values that
+        * fit into a signed integer.
+        * A min value is not calculated needed because it will always
+        * be 1 on all machines.
+        */
+       if (kvm_has_tsc_control) {
+               u64 max = min(0x7fffffffULL,
+                             __scale_tsc(kvm_max_tsc_scaling_ratio, tsc_khz));
+               kvm_max_guest_tsc_khz = max;
+       }
+
        kvm_init_msr_list();
        return 0;
 }
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index eba9cae..3556148 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -1183,4 +1183,7 @@ void kvm_arch_irq_bypass_start(struct irq_bypass_consumer 
*);
 int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
                                  uint32_t guest_irq, bool set);
 #endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */
+
+u64 kvm_scale_tsc(struct kvm_vcpu *vcpu, u64 tsc);
+
 #endif
diff --git a/include/linux/math64.h b/include/linux/math64.h
index c45c089..78aa80b 100644
--- a/include/linux/math64.h
+++ b/include/linux/math64.h
@@ -142,6 +142,13 @@ static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned 
int shift)
 }
 #endif /* mul_u64_u32_shr */
 
+#ifndef mul_u64_u64_shr
+static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
+{
+       return (u64)(((unsigned __int128)a * mul) >> shift);
+}
+#endif /* mul_u64_u64_shr */
+
 #else
 
 #ifndef mul_u64_u32_shr
@@ -161,6 +168,69 @@ static inline u64 mul_u64_u32_shr(u64 a, u32 mul, unsigned 
int shift)
 }
 #endif /* mul_u64_u32_shr */
 
+#ifndef mul64
+static inline void mul64(u64 *lo, u64 *hi, u64 a, u64 b)
+{
+       typedef union {
+               u64 ll;
+               struct {
+#ifdef __BIG_ENDIAN
+                       u32 high, low;
+#else
+                       u32 low, high;
+#endif
+               } l;
+       } LL;
+       LL rl, rm, rn, rh, a0, b0;
+       u64 c;
+
+       a0.ll = a;
+       b0.ll = b;
+
+       rl.ll = (u64)a0.l.low * b0.l.low;
+       rm.ll = (u64)a0.l.low * b0.l.high;
+       rn.ll = (u64)a0.l.high * b0.l.low;
+       rh.ll = (u64)a0.l.high * b0.l.high;
+
+       c = (u64)rl.l.high + rm.l.low + rn.l.low;
+       rl.l.high = c;
+       c >>= 32;
+       c = c + rm.l.high + rn.l.high + rh.l.low;
+       rh.l.low = c;
+       rh.l.high += (u32)(c >> 32);
+
+       *lo = rl.ll;
+       *hi = rh.ll;
+}
+#endif /* mul64 */
+
+#ifndef rshift128
+static inline void rshift128(u64 *lo, u64 *hi, unsigned int n)
+{
+       u64 h;
+       if (!n)
+               return;
+       h = *hi >> (n & 63);
+       if (n >= 64) {
+               *hi = 0;
+               *lo = h;
+       } else {
+               *lo = (*lo >> n) | (*hi << (64 - n));
+               *hi = h;
+       }
+}
+#endif /* rshift128 */
+
+#ifndef mul_u64_u64_shr
+static inline u64 mul_u64_u64_shr(u64 a, u64 mul, unsigned int shift)
+{
+       u64 lo, hi;
+       mul64(&lo, &hi, a, mul);
+       rshift128(&lo, &hi, shift);
+       return lo;
+}
+#endif /* mul_u64_u64_shr */
+
 #endif
 
 #endif /* _LINUX_MATH64_H */
-- 
2.4.8

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to