From: Lan Tianyu <tianyu....@microsoft.com>

This patch is to introduce tlb flush with range list interface and use
struct kvm_mmu_page as list entry. Use flush list function in the
kvm_mmu_commit_zap_page().

Signed-off-by: Lan Tianyu <tianyu....@microsoft.com>
---
 arch/x86/include/asm/kvm_host.h |  7 +++++++
 arch/x86/kvm/mmu.c              | 24 +++++++++++++++++++++++-
 2 files changed, 30 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 78d2a6714c3b..22dbaa8fba32 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -316,6 +316,12 @@ struct kvm_rmap_head {
 
 struct kvm_mmu_page {
        struct list_head link;
+
+       /*
+        * Tlb flush with range list uses struct kvm_mmu_page as list entry
+        * and all list operations should be under protection of mmu_lock.
+        */
+       struct list_head flush_link;
        struct hlist_node hash_link;
        bool unsync;
 
@@ -443,6 +449,7 @@ struct kvm_mmu {
 struct kvm_tlb_range {
        u64 start_gfn;
        u64 pages;
+       struct list_head *flush_list;
 };
 
 enum pmc_type {
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 068694fa2371..d3272c5066ea 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -289,6 +289,17 @@ static void kvm_flush_remote_tlbs_with_address(struct kvm 
*kvm,
 
        range.start_gfn = start_gfn;
        range.pages = pages;
+       range.flush_list = NULL;
+
+       kvm_flush_remote_tlbs_with_range(kvm, &range);
+}
+
+static void kvm_flush_remote_tlbs_with_list(struct kvm *kvm,
+               struct list_head *flush_list)
+{
+       struct kvm_tlb_range range;
+
+       range.flush_list = flush_list;
 
        kvm_flush_remote_tlbs_with_range(kvm, &range);
 }
@@ -2708,6 +2719,7 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
                                    struct list_head *invalid_list)
 {
        struct kvm_mmu_page *sp, *nsp;
+       LIST_HEAD(flush_list);
 
        if (list_empty(invalid_list))
                return;
@@ -2721,7 +2733,17 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
         * In addition, kvm_flush_remote_tlbs waits for all vcpus to exit
         * guest mode and/or lockless shadow page table walks.
         */
-       kvm_flush_remote_tlbs(kvm);
+       if (kvm_available_flush_tlb_with_range()) {
+               list_for_each_entry(sp, invalid_list, link)
+                       if (sp->sptep && is_last_spte(*sp->sptep,
+                           sp->role.level))
+                               list_add(&sp->flush_link, &flush_list);
+
+               if (!list_empty(&flush_list))
+                       kvm_flush_remote_tlbs_with_list(kvm, &flush_list);
+       } else {
+               kvm_flush_remote_tlbs(kvm);
+       }
 
        list_for_each_entry_safe(sp, nsp, invalid_list, link) {
                WARN_ON(!sp->role.invalid || sp->root_count);
-- 
2.14.4

Reply via email to