This is a preparation for the later change, which use vmcs12_read()
and vmcs12_write() to replace the way to access vmcs12 fields.

Since the above functions uses 'vcpu' as parameter, we also use
'vcpu' as the parameter in nested_cpu_has_xxx functions.

Signed-off-by: Dongxiao Xu <[email protected]>
---
 arch/x86/kvm/vmx.c |   57 +++++++++++++++++++++++++--------------------------
 1 files changed, 28 insertions(+), 29 deletions(-)

diff --git a/arch/x86/kvm/vmx.c b/arch/x86/kvm/vmx.c
index d8670e4..b036f9c 100644
--- a/arch/x86/kvm/vmx.c
+++ b/arch/x86/kvm/vmx.c
@@ -884,22 +884,22 @@ static inline bool report_flexpriority(void)
        return flexpriority_enabled;
 }
 
-static inline bool nested_cpu_has(struct vmcs12 *vmcs12, u32 bit)
+static inline bool nested_cpu_has(struct kvm_vcpu *vcpu, u32 bit)
 {
-       return vmcs12->cpu_based_vm_exec_control & bit;
+       return get_vmcs12(vcpu)->cpu_based_vm_exec_control & bit;
 }
 
-static inline bool nested_cpu_has2(struct vmcs12 *vmcs12, u32 bit)
+static inline bool nested_cpu_has2(struct kvm_vcpu *vcpu, u32 bit)
 {
-       return (vmcs12->cpu_based_vm_exec_control &
+       return (get_vmcs12(vcpu)->cpu_based_vm_exec_control &
                        CPU_BASED_ACTIVATE_SECONDARY_CONTROLS) &&
-               (vmcs12->secondary_vm_exec_control & bit);
+               (get_vmcs12(vcpu)->secondary_vm_exec_control & bit);
 }
 
-static inline bool nested_cpu_has_virtual_nmis(struct vmcs12 *vmcs12,
-       struct kvm_vcpu *vcpu)
+static inline bool nested_cpu_has_virtual_nmis(struct kvm_vcpu *vcpu)
 {
-       return vmcs12->pin_based_vm_exec_control & PIN_BASED_VIRTUAL_NMIS;
+       return get_vmcs12(vcpu)->pin_based_vm_exec_control &
+               PIN_BASED_VIRTUAL_NMIS;
 }
 
 static inline bool is_exception(u32 intr_info)
@@ -1883,7 +1883,7 @@ static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, 
u64 offset)
                /* recalculate vmcs02.TSC_OFFSET: */
                vmcs12 = get_vmcs12(vcpu);
                vmcs_write64(TSC_OFFSET, offset +
-                       (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING) ?
+                       (nested_cpu_has(vcpu, CPU_BASED_USE_TSC_OFFSETING) ?
                         vmcs12->tsc_offset : 0));
        } else {
                vmcs_write64(TSC_OFFSET, offset);
@@ -5719,7 +5719,7 @@ static bool nested_vmx_exit_handled_msr(struct kvm_vcpu 
*vcpu,
        u32 msr_index = vcpu->arch.regs[VCPU_REGS_RCX];
        gpa_t bitmap;
 
-       if (!nested_cpu_has(get_vmcs12(vcpu), CPU_BASED_USE_MSR_BITMAPS))
+       if (!nested_cpu_has(vcpu, CPU_BASED_USE_MSR_BITMAPS))
                return 1;
 
        /*
@@ -5775,7 +5775,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu 
*vcpu,
                                (vmcs12->cr3_target_count >= 4 &&
                                        vmcs12->cr3_target_value3 == val))
                                return 0;
-                       if (nested_cpu_has(vmcs12, CPU_BASED_CR3_LOAD_EXITING))
+                       if (nested_cpu_has(vcpu, CPU_BASED_CR3_LOAD_EXITING))
                                return 1;
                        break;
                case 4:
@@ -5784,7 +5784,7 @@ static bool nested_vmx_exit_handled_cr(struct kvm_vcpu 
*vcpu,
                                return 1;
                        break;
                case 8:
-                       if (nested_cpu_has(vmcs12, CPU_BASED_CR8_LOAD_EXITING))
+                       if (nested_cpu_has(vcpu, CPU_BASED_CR8_LOAD_EXITING))
                                return 1;
                        break;
                }
@@ -5872,15 +5872,15 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu 
*vcpu)
        case EXIT_REASON_CPUID:
                return 1;
        case EXIT_REASON_HLT:
-               return nested_cpu_has(vmcs12, CPU_BASED_HLT_EXITING);
+               return nested_cpu_has(vcpu, CPU_BASED_HLT_EXITING);
        case EXIT_REASON_INVD:
                return 1;
        case EXIT_REASON_INVLPG:
-               return nested_cpu_has(vmcs12, CPU_BASED_INVLPG_EXITING);
+               return nested_cpu_has(vcpu, CPU_BASED_INVLPG_EXITING);
        case EXIT_REASON_RDPMC:
-               return nested_cpu_has(vmcs12, CPU_BASED_RDPMC_EXITING);
+               return nested_cpu_has(vcpu, CPU_BASED_RDPMC_EXITING);
        case EXIT_REASON_RDTSC:
-               return nested_cpu_has(vmcs12, CPU_BASED_RDTSC_EXITING);
+               return nested_cpu_has(vcpu, CPU_BASED_RDTSC_EXITING);
        case EXIT_REASON_VMCALL: case EXIT_REASON_VMCLEAR:
        case EXIT_REASON_VMLAUNCH: case EXIT_REASON_VMPTRLD:
        case EXIT_REASON_VMPTRST: case EXIT_REASON_VMREAD:
@@ -5894,7 +5894,7 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu *vcpu)
        case EXIT_REASON_CR_ACCESS:
                return nested_vmx_exit_handled_cr(vcpu, vmcs12);
        case EXIT_REASON_DR_ACCESS:
-               return nested_cpu_has(vmcs12, CPU_BASED_MOV_DR_EXITING);
+               return nested_cpu_has(vcpu, CPU_BASED_MOV_DR_EXITING);
        case EXIT_REASON_IO_INSTRUCTION:
                /* TODO: support IO bitmaps */
                return 1;
@@ -5904,25 +5904,26 @@ static bool nested_vmx_exit_handled(struct kvm_vcpu 
*vcpu)
        case EXIT_REASON_INVALID_STATE:
                return 1;
        case EXIT_REASON_MWAIT_INSTRUCTION:
-               return nested_cpu_has(vmcs12, CPU_BASED_MWAIT_EXITING);
+               return nested_cpu_has(vcpu, CPU_BASED_MWAIT_EXITING);
        case EXIT_REASON_MONITOR_INSTRUCTION:
-               return nested_cpu_has(vmcs12, CPU_BASED_MONITOR_EXITING);
+               return nested_cpu_has(vcpu, CPU_BASED_MONITOR_EXITING);
        case EXIT_REASON_PAUSE_INSTRUCTION:
-               return nested_cpu_has(vmcs12, CPU_BASED_PAUSE_EXITING) ||
-                       nested_cpu_has2(vmcs12,
-                               SECONDARY_EXEC_PAUSE_LOOP_EXITING);
+               return nested_cpu_has(vcpu, CPU_BASED_PAUSE_EXITING) ||
+                       nested_cpu_has2(vcpu,
+                                       SECONDARY_EXEC_PAUSE_LOOP_EXITING);
        case EXIT_REASON_MCE_DURING_VMENTRY:
                return 0;
        case EXIT_REASON_TPR_BELOW_THRESHOLD:
                return 1;
        case EXIT_REASON_APIC_ACCESS:
-               return nested_cpu_has2(vmcs12,
+               return nested_cpu_has2(vcpu,
                        SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES);
        case EXIT_REASON_EPT_VIOLATION:
        case EXIT_REASON_EPT_MISCONFIG:
                return 0;
        case EXIT_REASON_WBINVD:
-               return nested_cpu_has2(vmcs12, SECONDARY_EXEC_WBINVD_EXITING);
+               return nested_cpu_has2(vcpu,
+                       SECONDARY_EXEC_WBINVD_EXITING);
        case EXIT_REASON_XSETBV:
                return 1;
        default:
@@ -5992,8 +5993,7 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu)
                       __func__, vectoring_info, exit_reason);
 
        if (unlikely(!cpu_has_virtual_nmis() && vmx->soft_vnmi_blocked &&
-           !(is_guest_mode(vcpu) && nested_cpu_has_virtual_nmis(
-                                       get_vmcs12(vcpu), vcpu)))) {
+           !(is_guest_mode(vcpu) && nested_cpu_has_virtual_nmis(vcpu)))) {
                if (vmx_interrupt_allowed(vcpu)) {
                        vmx->soft_vnmi_blocked = 0;
                } else if (vmx->vnmi_blocked_time > 1000000000LL &&
@@ -6686,8 +6686,7 @@ static void prepare_vmcs02(struct kvm_vcpu *vcpu, struct 
vmcs12 *vmcs12)
                        exec_control &= ~SECONDARY_EXEC_RDTSCP;
                /* Take the following fields only from vmcs12 */
                exec_control &= ~SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES;
-               if (nested_cpu_has(vmcs12,
-                               CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
+               if (nested_cpu_has(vcpu, CPU_BASED_ACTIVATE_SECONDARY_CONTROLS))
                        exec_control |= vmcs12->secondary_vm_exec_control;
 
                if (exec_control & SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) {
@@ -6862,7 +6861,7 @@ static int nested_vmx_run(struct kvm_vcpu *vcpu, bool 
launch)
                return 1;
        }
 
-       if (nested_cpu_has2(vmcs12, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
+       if (nested_cpu_has2(vcpu, SECONDARY_EXEC_VIRTUALIZE_APIC_ACCESSES) &&
                        !IS_ALIGNED(vmcs12->apic_access_addr, PAGE_SIZE)) {
                /*TODO: Also verify bits beyond physical address width are 0*/
                nested_vmx_failValid(vcpu, VMXERR_ENTRY_INVALID_CONTROL_FIELD);
-- 
1.7.1

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to [email protected]
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to