Mark pages accessed before dropping mmu_lock when faulting in guest memory
so that MIPS can convert to kvm_release_faultin_page() without tripping
its lockdep assertion on mmu_lock being held.

Signed-off-by: Sean Christopherson <sea...@google.com>
---
 arch/mips/kvm/mmu.c | 5 ++---
 1 file changed, 2 insertions(+), 3 deletions(-)

diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
index f1e4b618ec6d..69463ab24d97 100644
--- a/arch/mips/kvm/mmu.c
+++ b/arch/mips/kvm/mmu.c
@@ -634,10 +634,9 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, 
unsigned long gpa,
 
        if (writeable)
                kvm_set_pfn_dirty(pfn);
-
-       spin_unlock(&kvm->mmu_lock);
        kvm_release_pfn_clean(pfn);
-       kvm_set_pfn_accessed(pfn);
+
+       spin_unlock(&kvm->mmu_lock);
 out:
        srcu_read_unlock(&kvm->srcu, srcu_idx);
        return err;
-- 
2.47.0.rc1.288.g06298d1525-goog


Reply via email to