Intel Processor Trace MSRs(except IA32_RTIT_CTL) would be passthrough to
guest when Intel PT is enable in guest. So we need this function to
disable/enable intercept these MSRs.

Signed-off-by: Luwei Kang <luwei.k...@intel.com>
---
 arch/x86/kvm/vmx.c | 68 ++++++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 68 insertions(+)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index de9e958..e2de089 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -4978,6 +4978,41 @@ static void __vmx_disable_intercept_for_msr(unsigned 
long *msr_bitmap,
        }
 }
 
+static void __vmx_enable_intercept_for_msr(unsigned long *msr_bitmap,
+                                               u32 msr, int type)
+{
+       int f = sizeof(unsigned long);
+
+       if (!cpu_has_vmx_msr_bitmap())
+               return;
+
+       /*
+        * See Intel PRM Vol. 3, 20.6.9 (MSR-Bitmap Address). Early manuals
+        * have the write-low and read-high bitmap offsets the wrong way round.
+        * We can control MSRs 0x00000000-0x00001fff and 0xc0000000-0xc0001fff.
+        */
+       if (msr <= 0x1fff) {
+               if (type & MSR_TYPE_R)
+                       /* read-low */
+                       __set_bit(msr, msr_bitmap + 0x000 / f);
+
+               if (type & MSR_TYPE_W)
+                       /* write-low */
+                       __set_bit(msr, msr_bitmap + 0x800 / f);
+
+       } else if ((msr >= 0xc0000000) && (msr <= 0xc0001fff)) {
+               msr &= 0x1fff;
+               if (type & MSR_TYPE_R)
+                       /* read-high */
+                       __set_bit(msr, msr_bitmap + 0x400 / f);
+
+               if (type & MSR_TYPE_W)
+                       /* write-high */
+                       __set_bit(msr, msr_bitmap + 0xc00 / f);
+
+       }
+}
+
 /*
  * If a msr is allowed by L0, we should check whether it is allowed by L1.
  * The corresponding bit will be cleared unless both of L0 and L1 allow it.
@@ -5033,6 +5068,39 @@ static void vmx_disable_intercept_for_msr(u32 msr, bool 
longmode_only)
                                                msr, MSR_TYPE_R | MSR_TYPE_W);
 }
 
+static void vmx_enable_intercept_for_msr(u32 msr, bool longmode_only)
+{
+       if (!longmode_only)
+               __vmx_enable_intercept_for_msr(vmx_msr_bitmap_legacy,
+                                               msr, MSR_TYPE_R | MSR_TYPE_W);
+       __vmx_enable_intercept_for_msr(vmx_msr_bitmap_longmode,
+                                               msr, MSR_TYPE_R | MSR_TYPE_W);
+}
+
+static void pt_disable_intercept_for_msr(bool flag)
+{
+       unsigned int i;
+       unsigned int addr_num = kvm_get_pt_addr_cnt();
+
+       if (flag) {
+               vmx_disable_intercept_for_msr(MSR_IA32_RTIT_STATUS, false);
+               vmx_disable_intercept_for_msr(MSR_IA32_RTIT_OUTPUT_BASE, false);
+               vmx_disable_intercept_for_msr(MSR_IA32_RTIT_OUTPUT_MASK, false);
+               vmx_disable_intercept_for_msr(MSR_IA32_RTIT_CR3_MATCH, false);
+               for (i = 0; i < addr_num; i++)
+                       vmx_disable_intercept_for_msr(MSR_IA32_RTIT_ADDR0_A + i,
+                                                                       false);
+       } else {
+               vmx_enable_intercept_for_msr(MSR_IA32_RTIT_STATUS, false);
+               vmx_enable_intercept_for_msr(MSR_IA32_RTIT_OUTPUT_BASE, false);
+               vmx_enable_intercept_for_msr(MSR_IA32_RTIT_OUTPUT_MASK, false);
+               vmx_enable_intercept_for_msr(MSR_IA32_RTIT_CR3_MATCH, false);
+               for (i = 0; i < addr_num; i++)
+                       vmx_enable_intercept_for_msr(MSR_IA32_RTIT_ADDR0_A + i,
+                                                                       false);
+       }
+}
+
 static void vmx_disable_intercept_msr_x2apic(u32 msr, int type, bool 
apicv_active)
 {
        if (apicv_active) {
-- 
1.8.3.1

Reply via email to