Move it to struct kvm_arch_vcpu, replacing guest_pkru_valid with a
simple comparison against the host value of the register.

Signed-off-by: Paolo Bonzini <pbonz...@redhat.com>
---
 arch/x86/include/asm/kvm_host.h |  1 +
 arch/x86/kvm/kvm_cache_regs.h   |  5 -----
 arch/x86/kvm/mmu.h              |  2 +-
 arch/x86/kvm/svm.c              |  7 -------
 arch/x86/kvm/vmx.c              | 23 ++++++-----------------
 5 files changed, 8 insertions(+), 30 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 643308143bea..beb185361045 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -491,6 +491,7 @@ struct kvm_vcpu_arch {
        unsigned long cr4;
        unsigned long cr4_guest_owned_bits;
        unsigned long cr8;
+       u32 pkru;
        u32 hflags;
        u64 efer;
        u64 apic_base;
diff --git a/arch/x86/kvm/kvm_cache_regs.h b/arch/x86/kvm/kvm_cache_regs.h
index 762cdf2595f9..e1e89ee4af75 100644
--- a/arch/x86/kvm/kvm_cache_regs.h
+++ b/arch/x86/kvm/kvm_cache_regs.h
@@ -84,11 +84,6 @@ static inline u64 kvm_read_edx_eax(struct kvm_vcpu *vcpu)
                | ((u64)(kvm_register_read(vcpu, VCPU_REGS_RDX) & -1u) << 32);
 }
 
-static inline u32 kvm_read_pkru(struct kvm_vcpu *vcpu)
-{
-       return kvm_x86_ops->get_pkru(vcpu);
-}
-
 static inline void enter_guest_mode(struct kvm_vcpu *vcpu)
 {
        vcpu->arch.hflags |= HF_GUEST_MASK;
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 3ed6192d93b1..1b3095264fcf 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -168,7 +168,7 @@ static inline u8 permission_fault(struct kvm_vcpu *vcpu, 
struct kvm_mmu *mmu,
                * index of the protection domain, so pte_pkey * 2 is
                * is the index of the first bit for the domain.
                */
-               pkru_bits = (kvm_read_pkru(vcpu) >> (pte_pkey * 2)) & 3;
+               pkru_bits = (vcpu->arch.pkru >> (pte_pkey * 2)) & 3;
 
                /* clear present bit, replace PFEC.RSVD with ACC_USER_MASK. */
                offset = (pfec & ~1) +
diff --git a/arch/x86/kvm/svm.c b/arch/x86/kvm/svm.c
index 32c8d8f62985..f2355135164c 100644
--- a/arch/x86/kvm/svm.c
+++ b/arch/x86/kvm/svm.c
@@ -1826,11 +1826,6 @@ static void svm_set_rflags(struct kvm_vcpu *vcpu, 
unsigned long rflags)
        to_svm(vcpu)->vmcb->save.rflags = rflags;
 }
 
-static u32 svm_get_pkru(struct kvm_vcpu *vcpu)
-{
-       return 0;
-}
-
 static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
 {
        switch (reg) {
@@ -5438,8 +5433,6 @@ static void svm_setup_mce(struct kvm_vcpu *vcpu)
        .get_rflags = svm_get_rflags,
        .set_rflags = svm_set_rflags,
 
-       .get_pkru = svm_get_pkru,
-
        .tlb_flush = svm_flush_tlb,
 
        .run = svm_vcpu_run,
diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index ddabed8425b3..c603f658c42a 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -639,8 +639,6 @@ struct vcpu_vmx {
 
        u64 current_tsc_ratio;
 
-       bool guest_pkru_valid;
-       u32 guest_pkru;
        u32 host_pkru;
 
        /*
@@ -2392,11 +2390,6 @@ static void vmx_set_rflags(struct kvm_vcpu *vcpu, 
unsigned long rflags)
                to_vmx(vcpu)->emulation_required = emulation_required(vcpu);
 }
 
-static u32 vmx_get_pkru(struct kvm_vcpu *vcpu)
-{
-       return to_vmx(vcpu)->guest_pkru;
-}
-
 static u32 vmx_get_interrupt_shadow(struct kvm_vcpu *vcpu)
 {
        u32 interruptibility = vmcs_read32(GUEST_INTERRUPTIBILITY_INFO);
@@ -9239,8 +9232,9 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu *vcpu)
        if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
                vmx_set_interrupt_shadow(vcpu, 0);
 
-       if (vmx->guest_pkru_valid)
-               __write_pkru(vmx->guest_pkru);
+       if (static_cpu_has(X86_FEATURE_OSPKE) &&
+           vcpu->arch.pkru != vmx->host_pkru)
+               __write_pkru(vcpu->arch.pkru);
 
        atomic_switch_perf_msrs(vmx);
        debugctlmsr = get_debugctlmsr();
@@ -9388,13 +9382,10 @@ static void __noclone vmx_vcpu_run(struct kvm_vcpu 
*vcpu)
         * back on host, so it is safe to read guest PKRU from current
         * XSAVE.
         */
-       if (boot_cpu_has(X86_FEATURE_OSPKE)) {
-               vmx->guest_pkru = __read_pkru();
-               if (vmx->guest_pkru != vmx->host_pkru) {
-                       vmx->guest_pkru_valid = true;
+       if (static_cpu_has(X86_FEATURE_OSPKE)) {
+               vcpu->arch.pkru = __read_pkru();
+               if (vcpu->arch.pkru != vmx->host_pkru)
                        __write_pkru(vmx->host_pkru);
-               } else
-                       vmx->guest_pkru_valid = false;
        }
 
        /*
@@ -11875,8 +11866,6 @@ static void vmx_setup_mce(struct kvm_vcpu *vcpu)
        .get_rflags = vmx_get_rflags,
        .set_rflags = vmx_set_rflags,
 
-       .get_pkru = vmx_get_pkru,
-
        .tlb_flush = vmx_flush_tlb,
 
        .run = vmx_vcpu_run,
-- 
1.8.3.1


Reply via email to