Make the last few changes necessary to enable the TDP MMU to handle page
faults in parallel while holding the mmu_lock in read mode.

Reviewed-by: Peter Feiner <pfei...@google.com>
Signed-off-by: Ben Gardon <bgar...@google.com>
---
 arch/x86/kvm/mmu/mmu.c | 12 ++++++++++--
 1 file changed, 10 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index b4d6709c240e..3d181a2b2485 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3724,7 +3724,12 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, 
gpa_t gpa, u32 error_code,
                return r;
 
        r = RET_PF_RETRY;
-       write_lock(&vcpu->kvm->mmu_lock);
+
+       if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
+               read_lock(&vcpu->kvm->mmu_lock);
+       else
+               write_lock(&vcpu->kvm->mmu_lock);
+
        if (mmu_notifier_retry(vcpu->kvm, mmu_seq))
                goto out_unlock;
        r = make_mmu_pages_available(vcpu);
@@ -3739,7 +3744,10 @@ static int direct_page_fault(struct kvm_vcpu *vcpu, 
gpa_t gpa, u32 error_code,
                                 prefault, is_tdp);
 
 out_unlock:
-       write_unlock(&vcpu->kvm->mmu_lock);
+       if (is_tdp_mmu_root(vcpu->kvm, vcpu->arch.mmu->root_hpa))
+               read_unlock(&vcpu->kvm->mmu_lock);
+       else
+               write_unlock(&vcpu->kvm->mmu_lock);
        kvm_release_pfn_clean(pfn);
        return r;
 }
-- 
2.30.0.365.g02bc693789-goog

Reply via email to