Then the new function __kvm_mmu_prepare_zap_page only zaps the shadow page
without KVM_REQ_MMU_RELOAD. Later, we will use it to batch free root shadow
pages

Signed-off-by: Xiao Guangrong <xiaoguangr...@linux.vnet.ibm.com>
---
 arch/x86/kvm/mmu.c |   20 +++++++++++++++-----
 1 files changed, 15 insertions(+), 5 deletions(-)

diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 5578c91..e880cdd 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2066,8 +2066,9 @@ static int mmu_zap_unsync_children(struct kvm *kvm,
        return zapped;
 }
 
-static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
-                                   struct list_head *invalid_list)
+static int
+__kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
+                          struct list_head *invalid_list)
 {
        int ret;
 
@@ -2088,15 +2089,24 @@ static int kvm_mmu_prepare_zap_page(struct kvm *kvm, 
struct kvm_mmu_page *sp,
                ret++;
                list_move(&sp->link, invalid_list);
                kvm_mod_used_mmu_pages(kvm, -1);
-       } else {
+       } else
                list_move(&sp->link, &kvm->arch.active_mmu_pages);
-               kvm_reload_remote_mmus(kvm);
-       }
 
        sp->role.invalid = 1;
        return ret;
 }
 
+static int kvm_mmu_prepare_zap_page(struct kvm *kvm, struct kvm_mmu_page *sp,
+                                   struct list_head *invalid_list)
+{
+       int ret = __kvm_mmu_prepare_zap_page(kvm, sp, invalid_list);
+
+       if (sp->root_count)
+               kvm_reload_remote_mmus(kvm);
+
+       return ret;
+}
+
 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
                                    struct list_head *invalid_list)
 {
-- 
1.7.7.6

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html
Please read the FAQ at  http://www.tux.org/lkml/

Reply via email to