Rename KVM's accessor for retrieving a 'struct kvm_mmu_page' from the
associated host physical address to better convey what the function is
doing.

Signed-off-by: Sean Christopherson <sean.j.christopher...@intel.com>
---
 arch/x86/kvm/mmu/mmu.c          | 20 ++++++++++----------
 arch/x86/kvm/mmu/mmu_audit.c    |  6 +++---
 arch/x86/kvm/mmu/mmu_internal.h |  4 ++--
 3 files changed, 15 insertions(+), 15 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index cd1f8017de8a..258334b4e563 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -2208,7 +2208,7 @@ static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
                        continue;
                }
 
-               child = page_header(ent & PT64_BASE_ADDR_MASK);
+               child = to_shadow_page(ent & PT64_BASE_ADDR_MASK);
 
                if (child->unsync_children) {
                        if (mmu_pages_add(pvec, child, i))
@@ -2656,7 +2656,7 @@ static void validate_direct_spte(struct kvm_vcpu *vcpu, 
u64 *sptep,
                 * so we should update the spte at this point to get
                 * a new sp with the correct access.
                 */
-               child = page_header(*sptep & PT64_BASE_ADDR_MASK);
+               child = to_shadow_page(*sptep & PT64_BASE_ADDR_MASK);
                if (child->role.access == direct_access)
                        return;
 
@@ -2678,7 +2678,7 @@ static bool mmu_page_zap_pte(struct kvm *kvm, struct 
kvm_mmu_page *sp,
                        if (is_large_pte(pte))
                                --kvm->stat.lpages;
                } else {
-                       child = page_header(pte & PT64_BASE_ADDR_MASK);
+                       child = to_shadow_page(pte & PT64_BASE_ADDR_MASK);
                        drop_parent_pte(child, spte);
                }
                return true;
@@ -3110,7 +3110,7 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, u64 *sptep,
                        struct kvm_mmu_page *child;
                        u64 pte = *sptep;
 
-                       child = page_header(pte & PT64_BASE_ADDR_MASK);
+                       child = to_shadow_page(pte & PT64_BASE_ADDR_MASK);
                        drop_parent_pte(child, sptep);
                        flush = true;
                } else if (pfn != spte_to_pfn(*sptep)) {
@@ -3615,7 +3615,7 @@ static void mmu_free_root_page(struct kvm *kvm, hpa_t 
*root_hpa,
        if (!VALID_PAGE(*root_hpa))
                return;
 
-       sp = page_header(*root_hpa & PT64_BASE_ADDR_MASK);
+       sp = to_shadow_page(*root_hpa & PT64_BASE_ADDR_MASK);
        --sp->root_count;
        if (!sp->root_count && sp->role.invalid)
                kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
@@ -3845,7 +3845,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
 
        if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
                hpa_t root = vcpu->arch.mmu->root_hpa;
-               sp = page_header(root);
+               sp = to_shadow_page(root);
 
                /*
                 * Even if another CPU was marking the SP as unsync-ed
@@ -3879,7 +3879,7 @@ void kvm_mmu_sync_roots(struct kvm_vcpu *vcpu)
 
                if (root && VALID_PAGE(root)) {
                        root &= PT64_BASE_ADDR_MASK;
-                       sp = page_header(root);
+                       sp = to_shadow_page(root);
                        mmu_sync_children(vcpu, sp);
                }
        }
@@ -4235,8 +4235,8 @@ static inline bool is_root_usable(struct 
kvm_mmu_root_info *root, gpa_t pgd,
                                  union kvm_mmu_page_role role)
 {
        return (role.direct || pgd == root->pgd) &&
-              VALID_PAGE(root->hpa) && page_header(root->hpa) &&
-              role.word == page_header(root->hpa)->role.word;
+              VALID_PAGE(root->hpa) && to_shadow_page(root->hpa) &&
+              role.word == to_shadow_page(root->hpa)->role.word;
 }
 
 /*
@@ -4321,7 +4321,7 @@ static void __kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, 
gpa_t new_pgd,
         */
        vcpu_clear_mmio_info(vcpu, MMIO_GVA_ANY);
 
-       __clear_sp_write_flooding_count(page_header(vcpu->arch.mmu->root_hpa));
+       
__clear_sp_write_flooding_count(to_shadow_page(vcpu->arch.mmu->root_hpa));
 }
 
 void kvm_mmu_new_pgd(struct kvm_vcpu *vcpu, gpa_t new_pgd, bool skip_tlb_flush,
diff --git a/arch/x86/kvm/mmu/mmu_audit.c b/arch/x86/kvm/mmu/mmu_audit.c
index 6ba703d3497f..c8d51a37e2ce 100644
--- a/arch/x86/kvm/mmu/mmu_audit.c
+++ b/arch/x86/kvm/mmu/mmu_audit.c
@@ -45,7 +45,7 @@ static void __mmu_spte_walk(struct kvm_vcpu *vcpu, struct 
kvm_mmu_page *sp,
                      !is_last_spte(ent[i], level)) {
                        struct kvm_mmu_page *child;
 
-                       child = page_header(ent[i] & PT64_BASE_ADDR_MASK);
+                       child = to_shadow_page(ent[i] & PT64_BASE_ADDR_MASK);
                        __mmu_spte_walk(vcpu, child, fn, level - 1);
                }
        }
@@ -62,7 +62,7 @@ static void mmu_spte_walk(struct kvm_vcpu *vcpu, 
inspect_spte_fn fn)
        if (vcpu->arch.mmu->root_level >= PT64_ROOT_4LEVEL) {
                hpa_t root = vcpu->arch.mmu->root_hpa;
 
-               sp = page_header(root);
+               sp = to_shadow_page(root);
                __mmu_spte_walk(vcpu, sp, fn, vcpu->arch.mmu->root_level);
                return;
        }
@@ -72,7 +72,7 @@ static void mmu_spte_walk(struct kvm_vcpu *vcpu, 
inspect_spte_fn fn)
 
                if (root && VALID_PAGE(root)) {
                        root &= PT64_BASE_ADDR_MASK;
-                       sp = page_header(root);
+                       sp = to_shadow_page(root);
                        __mmu_spte_walk(vcpu, sp, fn, 2);
                }
        }
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index 6371bf1d0b1c..3acf3b8eb469 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -43,7 +43,7 @@ struct kvm_mmu_page {
        atomic_t write_flooding_count;
 };
 
-static inline struct kvm_mmu_page *page_header(hpa_t shadow_page)
+static inline struct kvm_mmu_page *to_shadow_page(hpa_t shadow_page)
 {
        struct page *page = pfn_to_page(shadow_page >> PAGE_SHIFT);
 
@@ -52,7 +52,7 @@ static inline struct kvm_mmu_page *page_header(hpa_t 
shadow_page)
 
 static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep)
 {
-       return page_header(__pa(sptep));
+       return to_shadow_page(__pa(sptep));
 }
 
 void kvm_mmu_gfn_disallow_lpage(struct kvm_memory_slot *slot, gfn_t gfn);
-- 
2.26.0

Reply via email to