Move kvm_mmu_available_pages() from mmu.h to mmu.c, it has a single
caller and has no business being exposed via mmu.h.

Signed-off-by: Sean Christopherson <sean.j.christopher...@intel.com>
---
 arch/x86/kvm/mmu.h     | 9 ---------
 arch/x86/kvm/mmu/mmu.c | 9 +++++++++
 2 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index 0ad06bfe2c2c..d46944488e72 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -64,15 +64,6 @@ bool kvm_can_do_async_pf(struct kvm_vcpu *vcpu);
 int kvm_handle_page_fault(struct kvm_vcpu *vcpu, u64 error_code,
                                u64 fault_address, char *insn, int insn_len);
 
-static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
-{
-       if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
-               return kvm->arch.n_max_mmu_pages -
-                       kvm->arch.n_used_mmu_pages;
-
-       return 0;
-}
-
 static inline int kvm_mmu_reload(struct kvm_vcpu *vcpu)
 {
        if (likely(vcpu->arch.mmu->root_hpa != INVALID_PAGE))
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index fdd05c233308..1b4d45b8f462 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -2825,6 +2825,15 @@ static bool prepare_zap_oldest_mmu_page(struct kvm *kvm,
        return kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
 }
 
+static inline unsigned long kvm_mmu_available_pages(struct kvm *kvm)
+{
+       if (kvm->arch.n_max_mmu_pages > kvm->arch.n_used_mmu_pages)
+               return kvm->arch.n_max_mmu_pages -
+                       kvm->arch.n_used_mmu_pages;
+
+       return 0;
+}
+
 static int make_mmu_pages_available(struct kvm_vcpu *vcpu)
 {
        LIST_HEAD(invalid_list);
-- 
2.26.0

Reply via email to