On 12/06/2015 03:58 PM, Haozhong Zhang wrote:
This patch adds a field tsc_scaling_ratio in struct hvm_vcpu to
record the TSC scaling ratio, and sets it up when tsc_set_info() is
called for a vcpu or when a vcpu is restored or reset.
Signed-off-by: Haozhong Zhang <haozhong.zh...@intel.com>
Reviewed-by: Boris Ostrovsky <boris.ostrov...@oracle.com>
with a few nits below.
---
xen/arch/x86/hvm/hvm.c | 30 ++++++++++++++++++++++++++++++
xen/arch/x86/hvm/svm/svm.c | 6 ++++--
xen/arch/x86/time.c | 13 ++++++++++++-
xen/include/asm-x86/hvm/hvm.h | 5 +++++
xen/include/asm-x86/hvm/svm/svm.h | 3 ---
xen/include/asm-x86/hvm/vcpu.h | 2 ++
xen/include/asm-x86/math64.h | 30 ++++++++++++++++++++++++++++++
7 files changed, 83 insertions(+), 6 deletions(-)
create mode 100644 xen/include/asm-x86/math64.h
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 0e63c33..52a0ef8 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -65,6 +65,7 @@
#include <asm/mtrr.h>
#include <asm/apic.h>
#include <asm/vm_event.h>
+#include <asm/math64.h>
#include <public/sched.h>
#include <public/hvm/ioreq.h>
#include <public/version.h>
@@ -301,6 +302,29 @@ int hvm_set_guest_pat(struct vcpu *v, u64 guest_pat)
return 1;
}
+void hvm_setup_tsc_scaling(struct vcpu *v)
+{
+ u64 ratio;
+
+ if ( !hvm_funcs.tsc_scaling_supported )
+ return;
+
+ /*
+ * The multiplication of the first two terms may overflow a 64-bit
+ * integer, so use mul_u64_u32_div() instead to keep precision.
+ */
+ ratio = mul_u64_u32_div(1ULL << hvm_funcs.tsc_scaling_ratio_frac_bits,
+ v->domain->arch.tsc_khz, cpu_khz);
+
+ if ( ratio == 0 || ratio > hvm_funcs.max_tsc_scaling_ratio )
+ return;
+
+ v->arch.hvm_vcpu.tsc_scaling_ratio = ratio;
+
+ if ( hvm_funcs.setup_tsc_scaling )
+ hvm_funcs.setup_tsc_scaling(v);
+}
+
void hvm_set_guest_tsc_fixed(struct vcpu *v, u64 guest_tsc, u64 at_tsc)
{
uint64_t tsc;
@@ -2024,6 +2048,9 @@ static int hvm_load_cpu_ctxt(struct domain *d,
hvm_domain_context_t *h)
if ( hvm_funcs.load_cpu_ctxt(v, &ctxt) < 0 )
return -EINVAL;
+ if ( hvm_funcs.tsc_scaling_supported )
+ hvm_setup_tsc_scaling(v);
+
I think you can drop the test here (and below) since you do the same
check inside hvm_setup_tsc_scaling()
v->arch.hvm_vcpu.msr_tsc_aux = ctxt.msr_tsc_aux;
seg.limit = ctxt.idtr_limit;
@@ -5584,6 +5611,9 @@ void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs,
uint16_t ip)
hvm_set_segment_register(v, x86_seg_gdtr, ®);
hvm_set_segment_register(v, x86_seg_idtr, ®);
+ if ( hvm_funcs.tsc_scaling_supported )
+ hvm_setup_tsc_scaling(v);
+
/* Sync AP's TSC with BSP's. */
v->arch.hvm_vcpu.cache_tsc_offset =
v->domain->vcpu[0]->arch.hvm_vcpu.cache_tsc_offset;
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index 9f741d0..d291327 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -811,7 +811,7 @@ static uint64_t svm_scale_tsc(struct vcpu *v, uint64_t tsc)
if ( !cpu_has_tsc_ratio || d->arch.vtsc )
return tsc;
- return scale_tsc(tsc, vcpu_tsc_ratio(v));
+ return scale_tsc(tsc, v->arch.hvm_vcpu.tsc_scaling_ratio);
}
static uint64_t svm_get_tsc_offset(uint64_t host_tsc, uint64_t guest_tsc,
@@ -987,7 +987,7 @@ static inline void svm_tsc_ratio_save(struct vcpu *v)
static inline void svm_tsc_ratio_load(struct vcpu *v)
{
if ( cpu_has_tsc_ratio && !v->domain->arch.vtsc )
- wrmsrl(MSR_AMD64_TSC_RATIO, vcpu_tsc_ratio(v));
+ wrmsrl(MSR_AMD64_TSC_RATIO, v->arch.hvm_vcpu.tsc_scaling_ratio);
}
static void svm_ctxt_switch_from(struct vcpu *v)
@@ -1193,6 +1193,8 @@ static int svm_vcpu_initialise(struct vcpu *v)
svm_guest_osvw_init(v);
+ v->arch.hvm_vcpu.tsc_scaling_ratio = DEFAULT_TSC_RATIO;
+
return 0;
}
diff --git a/xen/arch/x86/time.c b/xen/arch/x86/time.c
index 732d1e9..d4a94eb 100644
--- a/xen/arch/x86/time.c
+++ b/xen/arch/x86/time.c
@@ -1902,8 +1902,19 @@ void tsc_set_info(struct domain *d,
if ( has_hvm_container_domain(d) )
{
hvm_set_rdtsc_exiting(d, d->arch.vtsc);
- if ( d->vcpu && d->vcpu[0] && incarnation == 0 )
+ if ( d->vcpu && d->vcpu[0] )
{
+ /*
+ * TSC scaling ratio on BSP is set here during initial boot, while
During domain creation rather then during boot.
+ * the same TSC scaling ratio on APs will be set in
+ * hvm_vcpu_reset_state().
+ */
+ if ( !d->arch.vtsc && hvm_funcs.tsc_scaling_supported )
+ hvm_setup_tsc_scaling(d->vcpu[0]);
+
+ if ( incarnation )
+ return;
+
/*
* set_tsc_offset() is called from hvm_vcpu_initialise() before
* tsc_set_info(). New vtsc mode may require recomputing TSC
diff --git a/xen/include/asm-x86/hvm/hvm.h b/xen/include/asm-x86/hvm/hvm.h
index 8b10a67..e74524e 100644
--- a/xen/include/asm-x86/hvm/hvm.h
+++ b/xen/include/asm-x86/hvm/hvm.h
@@ -226,6 +226,9 @@ struct hvm_function_table {
int (*altp2m_vcpu_emulate_vmfunc)(struct cpu_user_regs *regs);
uint64_t (*scale_tsc)(struct vcpu *v, uint64_t tsc);
+
+ /* Architecture function to setup TSC scaling ratio */
+ void (*setup_tsc_scaling)(struct vcpu *v);
};
extern struct hvm_function_table hvm_funcs;
@@ -261,6 +264,8 @@ void hvm_set_guest_tsc_fixed(struct vcpu *v, u64 guest_tsc,
u64 at_tsc);
u64 hvm_get_guest_tsc_fixed(struct vcpu *v, u64 at_tsc);
#define hvm_get_guest_tsc(v) hvm_get_guest_tsc_fixed(v, 0)
+void hvm_setup_tsc_scaling(struct vcpu *v);
+
int hvm_set_mode(struct vcpu *v, int mode);
void hvm_init_guest_time(struct domain *d);
void hvm_set_guest_time(struct vcpu *v, u64 guest_time);
diff --git a/xen/include/asm-x86/hvm/svm/svm.h
b/xen/include/asm-x86/hvm/svm/svm.h
index d60ec23..c954b7e 100644
--- a/xen/include/asm-x86/hvm/svm/svm.h
+++ b/xen/include/asm-x86/hvm/svm/svm.h
@@ -97,9 +97,6 @@ extern u32 svm_feature_flags;
/* TSC rate */
#define DEFAULT_TSC_RATIO 0x0000000100000000ULL
#define TSC_RATIO_RSVD_BITS 0xffffff0000000000ULL
-#define TSC_RATIO(g_khz, h_khz) ( (((u64)(g_khz)<<32)/(u64)(h_khz)) & \
- ~TSC_RATIO_RSVD_BITS )
-#define vcpu_tsc_ratio(v) TSC_RATIO((v)->domain->arch.tsc_khz, cpu_khz)
extern void svm_host_osvw_reset(void);
extern void svm_host_osvw_init(void);
diff --git a/xen/include/asm-x86/hvm/vcpu.h b/xen/include/asm-x86/hvm/vcpu.h
index 152d9f3..901c988 100644
--- a/xen/include/asm-x86/hvm/vcpu.h
+++ b/xen/include/asm-x86/hvm/vcpu.h
@@ -175,6 +175,8 @@ struct hvm_vcpu {
u64 msr_tsc_adjust;
u64 msr_xss;
+ u64 tsc_scaling_ratio;
+
union {
struct arch_vmx_struct vmx;
struct arch_svm_struct svm;
diff --git a/xen/include/asm-x86/math64.h b/xen/include/asm-x86/math64.h
new file mode 100644
index 0000000..9af6aee
--- /dev/null
+++ b/xen/include/asm-x86/math64.h
@@ -0,0 +1,30 @@
+#ifndef __X86_MATH64
+#define __X86_MATH64
+
+/*
+ * (a * mul) / divisor
+ */
+static inline u64 mul_u64_u32_div(u64 a, u32 mul, u32 divisor)
Since this is taken from Linux code I thinkit's worth mentioning so.
-boris
+{
+ union {
+ u64 ll;
+ struct {
+ u32 low, high;
+ } l;
+ } u, rl, rh;
+
+ u.ll = a;
+ rl.ll = (u64)u.l.low * mul;
+ rh.ll = (u64)u.l.high * mul + rl.l.high;
+
+ /* Bits 32-63 of the result will be in rh.l.low. */
+ rl.l.high = do_div(rh.ll, divisor);
+
+ /* Bits 0-31 of the result will be in rl.l.low. */
+ do_div(rl.ll, divisor);
+
+ rl.l.high = rh.l.low;
+ return rl.ll;
+}
+
+#endif
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
http://lists.xen.org/xen-devel