I found another race related to some missing locking, this time around
uvm_pageclean().

Diff below fixes the two places in /sys/uvm where the page queue lock
should be taken.  To prevent further corruption I added some assertions 
and documented some global data structures that are currently protected
by this lock.

Note that uvm_pagefree() is called by many pmaps most of the time
without the lock held.  The diff below doesn't fix them and that's why
some assertions are commented out.

ok?

Index: uvm/uvm.h
===================================================================
RCS file: /cvs/src/sys/uvm/uvm.h,v
retrieving revision 1.67
diff -u -p -r1.67 uvm.h
--- uvm/uvm.h   6 Dec 2019 08:33:25 -0000       1.67
+++ uvm/uvm.h   18 Nov 2020 23:22:15 -0000
@@ -44,18 +44,20 @@
 /*
  * uvm structure (vm global state: collected in one structure for ease
  * of reference...)
+ *
+ *  Locks used to protect struct members in this file:
+ *     Q       uvm.pageqlock
  */
-
 struct uvm {
        /* vm_page related parameters */
 
        /* vm_page queues */
-       struct pglist page_active;      /* allocated pages, in use */
-       struct pglist page_inactive_swp;/* pages inactive (reclaim or free) */
-       struct pglist page_inactive_obj;/* pages inactive (reclaim or free) */
+       struct pglist page_active;      /* [Q] allocated pages, in use */
+       struct pglist page_inactive_swp;/* [Q] pages inactive (reclaim/free) */
+       struct pglist page_inactive_obj;/* [Q] pages inactive (reclaim/free) */
        /* Lock order: pageqlock, then fpageqlock. */
-       struct mutex pageqlock;         /* lock for active/inactive page q */
-       struct mutex fpageqlock;        /* lock for free page q  + pdaemon */
+       struct mutex pageqlock;         /* [] lock for active/inactive page q */
+       struct mutex fpageqlock;        /* [] lock for free page q  + pdaemon */
        boolean_t page_init_done;       /* TRUE if uvm_page_init() finished */
        struct uvm_pmr_control pmr_control; /* pmemrange data */
 
Index: uvm/uvm_anon.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_anon.c,v
retrieving revision 1.49
diff -u -p -r1.49 uvm_anon.c
--- uvm/uvm_anon.c      4 Jan 2020 16:17:29 -0000       1.49
+++ uvm/uvm_anon.c      18 Nov 2020 23:22:15 -0000
@@ -106,7 +106,9 @@ uvm_anfree_list(struct vm_anon *anon, st
                         * clean page, and put on on pglist
                         * for later freeing.
                         */
+                       uvm_lock_pageq();
                        uvm_pageclean(pg);
+                       uvm_unlock_pageq();
                        TAILQ_INSERT_HEAD(pgl, pg, pageq);
                } else {
                        uvm_lock_pageq();       /* lock out pagedaemon */
Index: uvm/uvm_object.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_object.c,v
retrieving revision 1.17
diff -u -p -r1.17 uvm_object.c
--- uvm/uvm_object.c    21 Oct 2020 09:08:14 -0000      1.17
+++ uvm/uvm_object.c    18 Nov 2020 23:22:15 -0000
@@ -172,7 +172,9 @@ uvm_objfree(struct uvm_object *uobj)
                 * this pg from the uobj we are throwing away
                 */
                atomic_clearbits_int(&pg->pg_flags, PG_TABLED);
+               uvm_lock_pageq();
                uvm_pageclean(pg);
+               uvm_unlock_pageq();
                TAILQ_INSERT_TAIL(&pgl, pg, pageq);
        }
        uvm_pmr_freepageq(&pgl);
Index: uvm/uvm_page.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_page.c,v
retrieving revision 1.150
diff -u -p -r1.150 uvm_page.c
--- uvm/uvm_page.c      22 Sep 2020 14:31:08 -0000      1.150
+++ uvm/uvm_page.c      18 Nov 2020 23:22:15 -0000
@@ -973,6 +973,10 @@ uvm_pageclean(struct vm_page *pg)
 {
        u_int flags_to_clear = 0;
 
+#if all_pmap_are_fixed
+       MUTEX_ASSERT_LOCKED(&uvm.pageqlock);
+#endif
+
 #ifdef DEBUG
        if (pg->uobject == (void *)0xdeadbeef &&
            pg->uanon == (void *)0xdeadbeef) {
@@ -1037,6 +1041,10 @@ uvm_pageclean(struct vm_page *pg)
 void
 uvm_pagefree(struct vm_page *pg)
 {
+#if all_pmap_are_fixed
+       MUTEX_ASSERT_LOCKED(&uvm.pageqlock);
+#endif
+
        uvm_pageclean(pg);
        uvm_pmr_freepages(pg, 1);
 }
@@ -1229,6 +1237,8 @@ uvm_pagelookup(struct uvm_object *obj, v
 void
 uvm_pagewire(struct vm_page *pg)
 {
+       MUTEX_ASSERT_LOCKED(&uvm.pageqlock);
+
        if (pg->wire_count == 0) {
                if (pg->pg_flags & PQ_ACTIVE) {
                        TAILQ_REMOVE(&uvm.page_active, pg, pageq);
@@ -1257,6 +1267,8 @@ uvm_pagewire(struct vm_page *pg)
 void
 uvm_pageunwire(struct vm_page *pg)
 {
+       MUTEX_ASSERT_LOCKED(&uvm.pageqlock);
+
        pg->wire_count--;
        if (pg->wire_count == 0) {
                TAILQ_INSERT_TAIL(&uvm.page_active, pg, pageq);
@@ -1276,6 +1288,8 @@ uvm_pageunwire(struct vm_page *pg)
 void
 uvm_pagedeactivate(struct vm_page *pg)
 {
+       MUTEX_ASSERT_LOCKED(&uvm.pageqlock);
+
        if (pg->pg_flags & PQ_ACTIVE) {
                TAILQ_REMOVE(&uvm.page_active, pg, pageq);
                atomic_clearbits_int(&pg->pg_flags, PQ_ACTIVE);
@@ -1310,6 +1324,8 @@ uvm_pagedeactivate(struct vm_page *pg)
 void
 uvm_pageactivate(struct vm_page *pg)
 {
+       MUTEX_ASSERT_LOCKED(&uvm.pageqlock);
+
        if (pg->pg_flags & PQ_INACTIVE) {
                if (pg->pg_flags & PQ_SWAPBACKED)
                        TAILQ_REMOVE(&uvm.page_inactive_swp, pg, pageq);
Index: uvm/uvm_pdaemon.c
===================================================================
RCS file: /cvs/src/sys/uvm/uvm_pdaemon.c,v
retrieving revision 1.87
diff -u -p -r1.87 uvm_pdaemon.c
--- uvm/uvm_pdaemon.c   29 Sep 2020 11:47:41 -0000      1.87
+++ uvm/uvm_pdaemon.c   18 Nov 2020 23:22:15 -0000
@@ -822,6 +822,8 @@ uvmpd_scan(void)
        struct uvm_object *uobj;
        boolean_t got_it;
 
+       MUTEX_ASSERT_LOCKED(&uvm.pageqlock);
+
        uvmexp.pdrevs++;                /* counter */
        uobj = NULL;
 

Reply via email to