By overlaying the field with 'link', we reduce the structure size by
16 bytes.

Changing call_rcu() to be per-page is not strictly necessary, but it
can help RCU estimate the amount of work pending.

Signed-off-by: Avi Kivity <a...@redhat.com>
---
 arch/x86/include/asm/kvm_host.h |    7 ++++---
 arch/x86/kvm/mmu.c              |   26 +++++++++-----------------
 2 files changed, 13 insertions(+), 20 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index b885445..ae02ff8 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -208,7 +208,10 @@ union kvm_mmu_page_role {
 };
 
 struct kvm_mmu_page {
-       struct list_head link;
+       union {
+               struct list_head link;
+               struct rcu_head rcu;
+       };
        struct hlist_node hash_link;
 
        /*
@@ -237,8 +240,6 @@ struct kvm_mmu_page {
 #endif
 
        int write_flooding_count;
-
-       struct rcu_head rcu;
 };
 
 struct kvm_pio_request {
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index c10f60b..26257d7 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -1338,7 +1338,6 @@ static void kvm_mmu_isolate_page(struct kvm_mmu_page *sp)
  */
 static void kvm_mmu_free_page(struct kvm_mmu_page *sp)
 {
-       list_del(&sp->link);
        free_page((unsigned long)sp->spt);
        kmem_cache_free(mmu_page_header_cache, sp);
 }
@@ -1980,20 +1979,12 @@ static void kvm_mmu_isolate_pages(struct list_head 
*invalid_list)
                kvm_mmu_isolate_page(sp);
 }
 
-static void free_pages_rcu(struct rcu_head *head)
+static void free_page_rcu(struct rcu_head *head)
 {
-       struct kvm_mmu_page *next, *sp;
+       struct kvm_mmu_page *sp;
 
        sp = container_of(head, struct kvm_mmu_page, rcu);
-       while (sp) {
-               if (!list_empty(&sp->link))
-                       next = list_first_entry(&sp->link,
-                                     struct kvm_mmu_page, link);
-               else
-                       next = NULL;
-               kvm_mmu_free_page(sp);
-               sp = next;
-       }
+       kvm_mmu_free_page(sp);
 }
 
 static void kvm_mmu_commit_zap_page(struct kvm *kvm,
@@ -2007,11 +1998,12 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
        kvm_flush_remote_tlbs(kvm);
 
        kvm_mmu_isolate_pages(invalid_list);
-       sp = list_first_entry(invalid_list, struct kvm_mmu_page, link);
-       list_del_init(invalid_list);
-
-       trace_kvm_mmu_delay_free_pages(sp);
-       call_rcu(&sp->rcu, free_pages_rcu);
+       while (!list_empty(invalid_list)) {
+               sp = list_first_entry(invalid_list, struct kvm_mmu_page, link);
+               list_del(&sp->link);
+               trace_kvm_mmu_delay_free_pages(sp);
+               call_rcu(&sp->rcu, free_page_rcu);
+       }
 }
 
 /*
-- 
1.7.10

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to