From: Grygorii Strashko <[email protected]>

Now Xen supports only two paging modes: HAP and SHADOW, so
!paging_mode_hap() is actually means paging_mode_shadow().

For an abstract, future case of there being a 3rd paging mode it is also
better to explicitly mention checked paging mode (SHADOW) instead of using
negative check of another paging mode (HAP).

Hence, s/!paging_mode_hap()/paging_mode_shadow() which also allows DCE drop
unused code when SHADOW_PAGING=n.
The !paging_mode_hap() in hap.c not changed as HAP is checking for itself
to be enabled.

Inspired by [1].

[1] 
https://patchwork.kernel.org/project/xen-devel/patch/[email protected]/
Signed-off-by: Grygorii Strashko <[email protected]>
---
 xen/arch/x86/cpu/mcheck/vmce.c   |  2 +-
 xen/arch/x86/hvm/domain.c        |  2 +-
 xen/arch/x86/hvm/hvm.c           | 10 +++++-----
 xen/arch/x86/hvm/svm/nestedsvm.c |  2 +-
 xen/arch/x86/hvm/svm/svm.c       |  8 ++++----
 xen/arch/x86/hvm/vmx/vmcs.c      |  2 +-
 xen/arch/x86/hvm/vmx/vmx.c       |  4 ++--
 xen/arch/x86/mm/p2m-ept.c        |  2 +-
 8 files changed, 16 insertions(+), 16 deletions(-)

diff --git a/xen/arch/x86/cpu/mcheck/vmce.c b/xen/arch/x86/cpu/mcheck/vmce.c
index 5abdf4cb5fd5..1a7e92506ac8 100644
--- a/xen/arch/x86/cpu/mcheck/vmce.c
+++ b/xen/arch/x86/cpu/mcheck/vmce.c
@@ -525,7 +525,7 @@ int unmmap_broken_page(struct domain *d, mfn_t mfn, 
unsigned long gfn)
     if ( !mfn_valid(mfn) )
         return -EINVAL;
 
-    if ( !is_hvm_domain(d) || !paging_mode_hap(d) )
+    if ( !is_hvm_domain(d) || paging_mode_shadow(d) )
         return -EOPNOTSUPP;
 
     rc = -1;
diff --git a/xen/arch/x86/hvm/domain.c b/xen/arch/x86/hvm/domain.c
index 048f29ae4911..37092f31f3f7 100644
--- a/xen/arch/x86/hvm/domain.c
+++ b/xen/arch/x86/hvm/domain.c
@@ -287,7 +287,7 @@ int arch_set_info_hvm_guest(struct vcpu *v, const struct 
vcpu_hvm_context *ctx)
     hvm_update_guest_cr(v, 4);
     hvm_update_guest_efer(v);
 
-    if ( hvm_paging_enabled(v) && !paging_mode_hap(v->domain) )
+    if ( hvm_paging_enabled(v) && paging_mode_shadow(v->domain) )
     {
         /* Shadow-mode CR3 change. Check PDBR and update refcounts. */
         struct page_info *page = get_page_from_gfn(v->domain,
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 7299cfa90ad5..cb4924be9aff 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2317,7 +2317,7 @@ int hvm_set_cr0(unsigned long value, bool may_defer)
             hvm_update_guest_efer(v);
         }
 
-        if ( !paging_mode_hap(d) )
+        if ( paging_mode_shadow(d) )
         {
             /* The guest CR3 must be pointing to the guest physical. */
             gfn = v->arch.hvm.guest_cr[3] >> PAGE_SHIFT;
@@ -2368,7 +2368,7 @@ int hvm_set_cr0(unsigned long value, bool may_defer)
             hvm_update_guest_efer(v);
         }
 
-        if ( !paging_mode_hap(d) )
+        if ( paging_mode_shadow(d) )
         {
             put_page(pagetable_get_page(v->arch.guest_table));
             v->arch.guest_table = pagetable_null();
@@ -2422,7 +2422,7 @@ int hvm_set_cr3(unsigned long value, bool noflush, bool 
may_defer)
         }
     }
 
-    if ( hvm_paging_enabled(curr) && !paging_mode_hap(currd) &&
+    if ( hvm_paging_enabled(curr) && paging_mode_shadow(currd) &&
          ((value ^ curr->arch.hvm.guest_cr[3]) >> PAGE_SHIFT) )
     {
         /* Shadow-mode CR3 change. Check PDBR and update refcounts. */
@@ -3966,7 +3966,7 @@ void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, 
uint16_t ip)
     if ( v->is_initialised )
         goto out;
 
-    if ( !paging_mode_hap(d) )
+    if ( paging_mode_shadow(d) )
     {
         if ( v->arch.hvm.guest_cr[0] & X86_CR0_PG )
             put_page(pagetable_get_page(v->arch.guest_table));
@@ -4241,7 +4241,7 @@ static int hvm_set_param(struct domain *d, uint32_t 
index, uint64_t value)
          * Only actually required for VT-x lacking unrestricted_guest
          * capabilities.  Short circuit the pause if possible.
          */
-        if ( !paging_mode_hap(d) || !using_vmx() )
+        if ( paging_mode_shadow(d) || !using_vmx() )
         {
             d->arch.hvm.params[index] = value;
             break;
diff --git a/xen/arch/x86/hvm/svm/nestedsvm.c b/xen/arch/x86/hvm/svm/nestedsvm.c
index dc2b6a42534a..1813692ffb03 100644
--- a/xen/arch/x86/hvm/svm/nestedsvm.c
+++ b/xen/arch/x86/hvm/svm/nestedsvm.c
@@ -1341,7 +1341,7 @@ nestedsvm_check_intercepts(struct vcpu *v, struct 
cpu_user_regs *regs,
             /* l2 guest intercepts #PF unnecessarily */
             return NESTEDHVM_VMEXIT_INJECT;
         }
-        if ( !paging_mode_hap(v->domain) )
+        if ( paging_mode_shadow(v->domain) )
             /* host shadow paging + guest shadow paging */
             return NESTEDHVM_VMEXIT_HOST;
 
diff --git a/xen/arch/x86/hvm/svm/svm.c b/xen/arch/x86/hvm/svm/svm.c
index b54f9d9af50e..73d6c7ed03f2 100644
--- a/xen/arch/x86/hvm/svm/svm.c
+++ b/xen/arch/x86/hvm/svm/svm.c
@@ -171,7 +171,7 @@ static void cf_check svm_update_guest_cr(
         }
 
         value = v->arch.hvm.guest_cr[0] | hw_cr0_mask;
-        if ( !paging_mode_hap(v->domain) )
+        if ( paging_mode_shadow(v->domain) )
             value |= X86_CR0_PG | X86_CR0_WP;
         vmcb_set_cr0(vmcb, value);
         break;
@@ -440,7 +440,7 @@ static int svm_vmcb_restore(struct vcpu *v, struct 
hvm_hw_cpu *c)
         }
     }
 
-    if ( !paging_mode_hap(v->domain) )
+    if ( paging_mode_shadow(v->domain) )
     {
         if ( c->cr0 & X86_CR0_PG )
         {
@@ -762,7 +762,7 @@ static int cf_check svm_set_guest_pat(struct vcpu *v, u64 
gpat)
 {
     struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
 
-    if ( !paging_mode_hap(v->domain) )
+    if ( paging_mode_shadow(v->domain) )
         return 0;
 
     vmcb_set_g_pat(vmcb, gpat);
@@ -773,7 +773,7 @@ static int cf_check svm_get_guest_pat(struct vcpu *v, u64 
*gpat)
 {
     struct vmcb_struct *vmcb = v->arch.hvm.svm.vmcb;
 
-    if ( !paging_mode_hap(v->domain) )
+    if ( paging_mode_shadow(v->domain) )
         return 0;
 
     *gpat = vmcb_get_g_pat(vmcb);
diff --git a/xen/arch/x86/hvm/vmx/vmcs.c b/xen/arch/x86/hvm/vmx/vmcs.c
index 2fffc2006ab0..0b511939ac8a 100644
--- a/xen/arch/x86/hvm/vmx/vmcs.c
+++ b/xen/arch/x86/hvm/vmx/vmcs.c
@@ -135,7 +135,7 @@ static int cf_check parse_ept_param_runtime(const char *s)
     for_each_domain ( d )
     {
         /* PV, or HVM Shadow domain?  Not applicable. */
-        if ( !paging_mode_hap(d) )
+        if ( paging_mode_shadow(d) )
             continue;
 
         /* Hardware domain? Not applicable. */
diff --git a/xen/arch/x86/hvm/vmx/vmx.c b/xen/arch/x86/hvm/vmx/vmx.c
index 3f0e113b0b67..c98ec110d144 100644
--- a/xen/arch/x86/hvm/vmx/vmx.c
+++ b/xen/arch/x86/hvm/vmx/vmx.c
@@ -1432,7 +1432,7 @@ static void cf_check vmx_set_segment_register(
 
 static int cf_check vmx_set_guest_pat(struct vcpu *v, u64 gpat)
 {
-    if ( !paging_mode_hap(v->domain) ||
+    if ( paging_mode_shadow(v->domain) ||
          unlikely(v->arch.hvm.vmx.cache_mode == CACHE_MODE_NO_FILL) )
         return 0;
 
@@ -1444,7 +1444,7 @@ static int cf_check vmx_set_guest_pat(struct vcpu *v, u64 
gpat)
 
 static int cf_check vmx_get_guest_pat(struct vcpu *v, u64 *gpat)
 {
-    if ( !paging_mode_hap(v->domain) ||
+    if ( paging_mode_shadow(v->domain) ||
          unlikely(v->arch.hvm.vmx.cache_mode == CACHE_MODE_NO_FILL) )
         return 0;
 
diff --git a/xen/arch/x86/mm/p2m-ept.c b/xen/arch/x86/mm/p2m-ept.c
index b854a08b4c4f..ce4ef632ae0a 100644
--- a/xen/arch/x86/mm/p2m-ept.c
+++ b/xen/arch/x86/mm/p2m-ept.c
@@ -1270,7 +1270,7 @@ void ept_sync_domain(struct p2m_domain *p2m)
     struct domain *d = p2m->domain;
 
     /* Only if using EPT and this domain has some VCPUs to dirty. */
-    if ( !paging_mode_hap(d) || !d->vcpu || !d->vcpu[0] )
+    if ( paging_mode_shadow(d) || !d->vcpu || !d->vcpu[0] )
         return;
 
     ept_sync_domain_prepare(p2m);
-- 
2.34.1

Reply via email to