The non-RCU path costs us two atomic operations, as well as extra
code complexity.

Signed-off-by: Avi Kivity <a...@redhat.com>
---
 arch/x86/include/asm/kvm_host.h |    2 --
 arch/x86/kvm/mmu.c              |   46 ++++++++-------------------------------
 2 files changed, 9 insertions(+), 39 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index f624ca7..b885445 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -536,8 +536,6 @@ struct kvm_arch {
        u64 hv_guest_os_id;
        u64 hv_hypercall;
 
-       atomic_t reader_counter;
-
        #ifdef CONFIG_KVM_MMU_AUDIT
        int audit_point;
        #endif
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 29ad6f9..c10f60b 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -549,23 +549,6 @@ static u64 mmu_spte_get_lockless(u64 *sptep)
        return __get_spte_lockless(sptep);
 }
 
-static void walk_shadow_page_lockless_begin(struct kvm_vcpu *vcpu)
-{
-       rcu_read_lock();
-       atomic_inc(&vcpu->kvm->arch.reader_counter);
-
-       /* Increase the counter before walking shadow page table */
-       smp_mb__after_atomic_inc();
-}
-
-static void walk_shadow_page_lockless_end(struct kvm_vcpu *vcpu)
-{
-       /* Decrease the counter after walking shadow page table finished */
-       smp_mb__before_atomic_dec();
-       atomic_dec(&vcpu->kvm->arch.reader_counter);
-       rcu_read_unlock();
-}
-
 static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
                                  struct kmem_cache *base_cache, int min)
 {
@@ -2023,23 +2006,12 @@ static void kvm_mmu_commit_zap_page(struct kvm *kvm,
 
        kvm_flush_remote_tlbs(kvm);
 
-       if (atomic_read(&kvm->arch.reader_counter)) {
-               kvm_mmu_isolate_pages(invalid_list);
-               sp = list_first_entry(invalid_list, struct kvm_mmu_page, link);
-               list_del_init(invalid_list);
-
-               trace_kvm_mmu_delay_free_pages(sp);
-               call_rcu(&sp->rcu, free_pages_rcu);
-               return;
-       }
-
-       do {
-               sp = list_first_entry(invalid_list, struct kvm_mmu_page, link);
-               WARN_ON(!sp->role.invalid || sp->root_count);
-               kvm_mmu_isolate_page(sp);
-               kvm_mmu_free_page(sp);
-       } while (!list_empty(invalid_list));
+       kvm_mmu_isolate_pages(invalid_list);
+       sp = list_first_entry(invalid_list, struct kvm_mmu_page, link);
+       list_del_init(invalid_list);
 
+       trace_kvm_mmu_delay_free_pages(sp);
+       call_rcu(&sp->rcu, free_pages_rcu);
 }
 
 /*
@@ -2976,11 +2948,11 @@ static u64 walk_shadow_page_get_mmio_spte(struct 
kvm_vcpu *vcpu, u64 addr)
        struct kvm_shadow_walk_iterator iterator;
        u64 spte = 0ull;
 
-       walk_shadow_page_lockless_begin(vcpu);
+       rcu_read_lock();
        for_each_shadow_entry_lockless(vcpu, addr, iterator, spte)
                if (!is_shadow_present_pte(spte))
                        break;
-       walk_shadow_page_lockless_end(vcpu);
+       rcu_read_unlock();
 
        return spte;
 }
@@ -4060,14 +4032,14 @@ int kvm_mmu_get_spte_hierarchy(struct kvm_vcpu *vcpu, 
u64 addr, u64 sptes[4])
        u64 spte;
        int nr_sptes = 0;
 
-       walk_shadow_page_lockless_begin(vcpu);
+       rcu_read_lock();
        for_each_shadow_entry_lockless(vcpu, addr, iterator, spte) {
                sptes[iterator.level-1] = spte;
                nr_sptes++;
                if (!is_shadow_present_pte(spte))
                        break;
        }
-       walk_shadow_page_lockless_end(vcpu);
+       rcu_read_unlock();
 
        return nr_sptes;
 }
-- 
1.7.10

--
To unsubscribe from this list: send the line "unsubscribe kvm" in
the body of a message to majord...@vger.kernel.org
More majordomo info at  http://vger.kernel.org/majordomo-info.html

Reply via email to