Mark pages accessed before dropping mmu_lock when faulting in guest memory
so that LoongArch can convert to kvm_release_faultin_page() without
tripping its lockdep assertion on mmu_lock being held.

Signed-off-by: Sean Christopherson <sea...@google.com>
---
 arch/loongarch/kvm/mmu.c | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git a/arch/loongarch/kvm/mmu.c b/arch/loongarch/kvm/mmu.c
index 52b5c16cf250..230cafa178d7 100644
--- a/arch/loongarch/kvm/mmu.c
+++ b/arch/loongarch/kvm/mmu.c
@@ -902,13 +902,13 @@ static int kvm_map_page(struct kvm_vcpu *vcpu, unsigned 
long gpa, bool write)
 
        if (writeable)
                kvm_set_pfn_dirty(pfn);
+       kvm_release_pfn_clean(pfn);
 
        spin_unlock(&kvm->mmu_lock);
 
        if (prot_bits & _PAGE_DIRTY)
                mark_page_dirty_in_slot(kvm, memslot, gfn);
 
-       kvm_release_pfn_clean(pfn);
 out:
        srcu_read_unlock(&kvm->srcu, srcu_idx);
        return err;
-- 
2.46.0.rc1.232.g9752f9e123-goog

Reply via email to