The branch main has been updated by alc:

URL: 
https://cgit.FreeBSD.org/src/commit/?id=e41fde3ed71c1e4fce81eac002c9f5b0926e6c49

commit e41fde3ed71c1e4fce81eac002c9f5b0926e6c49
Author:     Alan Cox <a...@freebsd.org>
AuthorDate: 2021-07-04 05:20:42 +0000
Commit:     Alan Cox <a...@freebsd.org>
CommitDate: 2021-07-06 02:07:40 +0000

    On a failed fcmpset don't pointlessly repeat tests
    
    In a few places, on a failed compare-and-set, both the amd64 pmap and
    the arm64 pmap repeat tests on bits that won't change state while the
    pmap is locked.  Eliminate some of these unnecessary tests.
    
    Reviewed by:    andrew, kib, markj
    MFC after:      1 week
    Differential Revision:  https://reviews.freebsd.org/D31014
---
 sys/amd64/amd64/pmap.c | 11 +++++------
 sys/arm64/arm64/pmap.c | 15 ++++++++-------
 2 files changed, 13 insertions(+), 13 deletions(-)

diff --git a/sys/amd64/amd64/pmap.c b/sys/amd64/amd64/pmap.c
index ea017b8a61a8..5e0b6d76ae0a 100644
--- a/sys/amd64/amd64/pmap.c
+++ b/sys/amd64/amd64/pmap.c
@@ -8459,7 +8459,7 @@ pmap_remove_write(vm_page_t m)
        pvh = (m->flags & PG_FICTITIOUS) != 0 ? &pv_dummy :
            pa_to_pvh(VM_PAGE_TO_PHYS(m));
        rw_wlock(lock);
-retry_pv_loop:
+retry:
        TAILQ_FOREACH_SAFE(pv, &pvh->pv_list, pv_next, next_pv) {
                pmap = PV_PMAP(pv);
                if (!PMAP_TRYLOCK(pmap)) {
@@ -8469,7 +8469,7 @@ retry_pv_loop:
                        rw_wlock(lock);
                        if (pvh_gen != pvh->pv_gen) {
                                PMAP_UNLOCK(pmap);
-                               goto retry_pv_loop;
+                               goto retry;
                        }
                }
                PG_RW = pmap_rw_bit(pmap);
@@ -8493,7 +8493,7 @@ retry_pv_loop:
                        if (pvh_gen != pvh->pv_gen ||
                            md_gen != m->md.pv_gen) {
                                PMAP_UNLOCK(pmap);
-                               goto retry_pv_loop;
+                               goto retry;
                        }
                }
                PG_M = pmap_modified_bit(pmap);
@@ -8503,12 +8503,11 @@ retry_pv_loop:
                    ("pmap_remove_write: found a 2mpage in page %p's pv list",
                    m));
                pte = pmap_pde_to_pte(pde, pv->pv_va);
-retry:
                oldpte = *pte;
                if (oldpte & PG_RW) {
-                       if (!atomic_cmpset_long(pte, oldpte, oldpte &
+                       while (!atomic_fcmpset_long(pte, &oldpte, oldpte &
                            ~(PG_RW | PG_M)))
-                               goto retry;
+                               cpu_spinwait();
                        if ((oldpte & PG_M) != 0)
                                vm_page_dirty(m);
                        pmap_invalidate_page(pmap, pv->pv_va);
diff --git a/sys/arm64/arm64/pmap.c b/sys/arm64/arm64/pmap.c
index 79b9d20231aa..bf476490b6be 100644
--- a/sys/arm64/arm64/pmap.c
+++ b/sys/arm64/arm64/pmap.c
@@ -3223,10 +3223,12 @@ pmap_protect_l2(pmap_t pmap, pt_entry_t *l2, 
vm_offset_t sva, pt_entry_t mask,
         * Return if the L2 entry already has the desired access restrictions
         * in place.
         */
-retry:
        if ((old_l2 & mask) == nbits)
                return;
 
+       while (!atomic_fcmpset_64(l2, &old_l2, (old_l2 & ~mask) | nbits))
+               cpu_spinwait();
+
        /*
         * When a dirty read/write superpage mapping is write protected,
         * update the dirty field of each of the superpage's constituent 4KB
@@ -3240,9 +3242,6 @@ retry:
                        vm_page_dirty(mt);
        }
 
-       if (!atomic_fcmpset_64(l2, &old_l2, (old_l2 & ~mask) | nbits))
-               goto retry;
-
        /*
         * Since a promotion must break the 4KB page mappings before making
         * the 2MB page mapping, a pmap_invalidate_page() suffices.
@@ -3334,7 +3333,7 @@ pmap_protect(pmap_t pmap, vm_offset_t sva, vm_offset_t 
eva, vm_prot_t prot)
                for (l3p = pmap_l2_to_l3(l2, sva); sva != va_next; l3p++,
                    sva += L3_SIZE) {
                        l3 = pmap_load(l3p);
-retry:
+
                        /*
                         * Go to the next L3 entry if the current one is
                         * invalid or already has the desired access
@@ -3351,6 +3350,10 @@ retry:
                                continue;
                        }
 
+                       while (!atomic_fcmpset_64(l3p, &l3, (l3 & ~mask) |
+                           nbits))
+                               cpu_spinwait();
+
                        /*
                         * When a dirty read/write mapping is write protected,
                         * update the page's dirty field.
@@ -3360,8 +3363,6 @@ retry:
                            pmap_pte_dirty(pmap, l3))
                                vm_page_dirty(PHYS_TO_VM_PAGE(l3 & ~ATTR_MASK));
 
-                       if (!atomic_fcmpset_64(l3p, &l3, (l3 & ~mask) | nbits))
-                               goto retry;
                        if (va == va_next)
                                va = sva;
                }
_______________________________________________
dev-commits-src-main@freebsd.org mailing list
https://lists.freebsd.org/mailman/listinfo/dev-commits-src-main
To unsubscribe, send any mail to "dev-commits-src-main-unsubscr...@freebsd.org"

Reply via email to