From: Yulei Zhang <yuleixzh...@tencent.com>

During guest boots up it will modify the memory slots multiple times,
so add page table remove function to free pre-pinned memory according
to the the memory slot changes.

Signed-off-by: Yulei Zhang <yuleixzh...@tencent.com>
---
 arch/x86/kvm/mmu/mmu.c | 56 ++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 56 insertions(+)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 1609012be67d..539974183653 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -6454,6 +6454,62 @@ int kvm_direct_tdp_populate_page_table(struct kvm *kvm, 
struct kvm_memory_slot *
        return 0;
 }
 
+static int __kvm_remove_spte(struct kvm *kvm, u64 *addr, gfn_t gfn, int level)
+{
+       int i;
+       int ret = level;
+       bool present = false;
+       kvm_pfn_t pfn;
+       u64 *sptep = (u64 *)__va((*addr) & PT64_BASE_ADDR_MASK);
+       unsigned index = SHADOW_PT_INDEX(gfn << PAGE_SHIFT, level);
+
+       for (i = 0; i < PT64_ENT_PER_PAGE; ++i) {
+               if (is_shadow_present_pte(sptep[i])) {
+                       if (i == index) {
+                               if (!is_last_spte(sptep[i], level)) {
+                                       ret = __kvm_remove_spte(kvm, &sptep[i], 
gfn, level - 1);
+                                       if (is_shadow_present_pte(sptep[i]))
+                                               return ret;
+                               } else {
+                                       pfn = spte_to_pfn(sptep[i]);
+                                       mmu_spte_clear_track_bits(&sptep[i]);
+                                       kvm_release_pfn_clean(pfn);
+                                       if (present)
+                                               return ret;
+                               }
+                       } else {
+                               if (i > index)
+                                       return ret;
+                               else
+                                       present = true;
+                       }
+               }
+       }
+
+       if (!present) {
+               pfn = spte_to_pfn(*addr);
+               mmu_spte_clear_track_bits(addr);
+               kvm_release_pfn_clean(pfn);
+       }
+       return ret;
+}
+
+void kvm_direct_tdp_remove_page_table(struct kvm *kvm, struct kvm_memory_slot 
*slot)
+{
+       gfn_t gfn = slot->base_gfn;
+       int host_level;
+
+       if (!kvm->arch.global_root_hpa)
+               return;
+
+       for (gfn = slot->base_gfn;
+               gfn < slot->base_gfn + slot->npages;
+               gfn += KVM_PAGES_PER_HPAGE(host_level))
+               host_level = __kvm_remove_spte(kvm, 
&(kvm->arch.global_root_hpa), gfn, PT64_ROOT_4LEVEL);
+
+       kvm_flush_remote_tlbs(kvm);
+}
+
 /*
  * Calculate mmu pages needed for kvm.
  */
-- 
2.17.1

Reply via email to