On 02/02/21 19:57, Ben Gardon wrote:
@@ -720,7 +790,8 @@ int kvm_tdp_mmu_map(struct kvm_vcpu *vcpu, gpa_t gpa, u32 
error_code,
                 */
                if (is_shadow_present_pte(iter.old_spte) &&
                    is_large_pte(iter.old_spte)) {
-                       tdp_mmu_set_spte(vcpu->kvm, &iter, 0);
+                       if (!tdp_mmu_set_spte_atomic(vcpu->kvm, &iter, 0))
+                               break;
kvm_flush_remote_tlbs_with_address(vcpu->kvm, iter.gfn,
                                        KVM_PAGES_PER_HPAGE(iter.level));

                        /*
                         * The iter must explicitly re-read the spte here
                         * because the new value informs the !present
                         * path below.
                         */
                        iter.old_spte = READ_ONCE(*rcu_dereference(iter.sptep));
                }

                if (!is_shadow_present_pte(iter.old_spte)) {

Would it be easier to reason about this code by making it retry, like:

retry:
                if (is_shadow_present_pte(iter.old_spte)) {
                        if (is_large_pte(iter.old_spte)) {
                                if (!tdp_mmu_zap_spte_atomic(vcpu->kvm, &iter))
                                        break;

                                /*
                                 * The iter must explicitly re-read the SPTE 
because
                                 * the atomic cmpxchg failed.
                                 */
                                iter.old_spte = 
READ_ONCE(*rcu_dereference(iter.sptep));
                                goto retry;
                        }
                } else {
                        ...
                }

?

Paolo

Reply via email to