We are going to use it for SVM too, so use a more generic name.

Signed-off-by: Paolo Bonzini <pbonz...@redhat.com>
---
 arch/x86/include/asm/kvm_host.h | 4 ++--
 arch/x86/kvm/vmx/nested.c       | 8 ++++----
 arch/x86/kvm/x86.c              | 4 ++--
 3 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 5303dbc5c9bc..c12babf6377c 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -80,7 +80,7 @@
 #define KVM_REQ_HV_EXIT                        KVM_ARCH_REQ(21)
 #define KVM_REQ_HV_STIMER              KVM_ARCH_REQ(22)
 #define KVM_REQ_LOAD_EOI_EXITMAP       KVM_ARCH_REQ(23)
-#define KVM_REQ_GET_VMCS12_PAGES       KVM_ARCH_REQ(24)
+#define KVM_REQ_GET_NESTED_STATE_PAGES KVM_ARCH_REQ(24)
 #define KVM_REQ_APICV_UPDATE \
        KVM_ARCH_REQ_FLAGS(25, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
 #define KVM_REQ_TLB_FLUSH_CURRENT      KVM_ARCH_REQ(26)
@@ -1238,7 +1238,7 @@ struct kvm_x86_nested_ops {
        int (*set_state)(struct kvm_vcpu *vcpu,
                         struct kvm_nested_state __user *user_kvm_nested_state,
                         struct kvm_nested_state *kvm_state);
-       bool (*get_vmcs12_pages)(struct kvm_vcpu *vcpu);
+       bool (*get_nested_state_pages)(struct kvm_vcpu *vcpu);
        int (*write_log_dirty)(struct kvm_vcpu *vcpu, gpa_t l2_gpa);
 
        int (*enable_evmcs)(struct kvm_vcpu *vcpu,
diff --git a/arch/x86/kvm/vmx/nested.c b/arch/x86/kvm/vmx/nested.c
index 1bb6b31eb646..7a4dc5abd7e2 100644
--- a/arch/x86/kvm/vmx/nested.c
+++ b/arch/x86/kvm/vmx/nested.c
@@ -244,7 +244,7 @@ static void free_nested(struct kvm_vcpu *vcpu)
        if (!vmx->nested.vmxon && !vmx->nested.smm.vmxon)
                return;
 
-       kvm_clear_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);
+       kvm_clear_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
 
        vmx->nested.vmxon = false;
        vmx->nested.smm.vmxon = false;
@@ -3387,7 +3387,7 @@ enum nvmx_vmentry_status 
nested_vmx_enter_non_root_mode(struct kvm_vcpu *vcpu,
                 * to nested_get_vmcs12_pages before the next VM-entry.  The 
MSRs
                 * have already been set at vmentry time and should not be 
reset.
                 */
-               kvm_make_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);
+               kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
        }
 
        /*
@@ -6182,7 +6182,7 @@ static int vmx_set_nested_state(struct kvm_vcpu *vcpu,
                 * restored yet. EVMCS will be mapped from
                 * nested_get_vmcs12_pages().
                 */
-               kvm_make_request(KVM_REQ_GET_VMCS12_PAGES, vcpu);
+               kvm_make_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu);
        } else {
                return -EINVAL;
        }
@@ -6561,7 +6561,7 @@ struct kvm_x86_nested_ops vmx_nested_ops = {
        .hv_timer_pending = nested_vmx_preemption_timer_pending,
        .get_state = vmx_get_nested_state,
        .set_state = vmx_set_nested_state,
-       .get_vmcs12_pages = nested_get_vmcs12_pages,
+       .get_nested_state_pages = nested_get_vmcs12_pages,
        .write_log_dirty = nested_vmx_write_pml_buffer,
        .enable_evmcs = nested_enable_evmcs,
        .get_evmcs_version = nested_get_evmcs_version,
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 1994602a0851..92ead1782d57 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -8365,8 +8365,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        bool req_immediate_exit = false;
 
        if (kvm_request_pending(vcpu)) {
-               if (kvm_check_request(KVM_REQ_GET_VMCS12_PAGES, vcpu)) {
-                       if 
(unlikely(!kvm_x86_ops.nested_ops->get_vmcs12_pages(vcpu))) {
+               if (kvm_check_request(KVM_REQ_GET_NESTED_STATE_PAGES, vcpu)) {
+                       if 
(unlikely(!kvm_x86_ops.nested_ops->get_nested_state_pages(vcpu))) {
                                r = 0;
                                goto out;
                        }
-- 
2.26.2


Reply via email to