On Fri, Nov 04, 2011 at 06:03:39PM +0200, Kostik Belousov wrote:

Below is the KBI patch after vm_page_bits_t merge is done.
Again, I did not spent time converting all in-tree consumers
from the (potentially) loadable modules to the new KPI until it
is agreed upon.

diff --git a/sys/nfsclient/nfs_bio.c b/sys/nfsclient/nfs_bio.c
index 305c189..7264cd1 100644
--- a/sys/nfsclient/nfs_bio.c
+++ b/sys/nfsclient/nfs_bio.c
@@ -128,7 +128,7 @@ nfs_getpages(struct vop_getpages_args *ap)
         * can only occur at the file EOF.
         */
        VM_OBJECT_LOCK(object);
-       if (pages[ap->a_reqpage]->valid != 0) {
+       if (vm_page_read_valid(pages[ap->a_reqpage]) != 0) {
                for (i = 0; i < npages; ++i) {
                        if (i != ap->a_reqpage) {
                                vm_page_lock(pages[i]);
@@ -198,16 +198,16 @@ nfs_getpages(struct vop_getpages_args *ap)
                        /*
                         * Read operation filled an entire page
                         */
-                       m->valid = VM_PAGE_BITS_ALL;
-                       KASSERT(m->dirty == 0,
+                       vm_page_write_valid(m, VM_PAGE_BITS_ALL);
+                       KASSERT(vm_page_read_dirty(m) == 0,
                            ("nfs_getpages: page %p is dirty", m));
                } else if (size > toff) {
                        /*
                         * Read operation filled a partial page.
                         */
-                       m->valid = 0;
+                       vm_page_write_valid(m, 0);
                        vm_page_set_valid(m, 0, size - toff);
-                       KASSERT(m->dirty == 0,
+                       KASSERT(vm_page_read_dirty(m) == 0,
                            ("nfs_getpages: page %p is dirty", m));
                } else {
                        /*
diff --git a/sys/vm/vm_page.c b/sys/vm/vm_page.c
index 389aea5..2f41e70 100644
--- a/sys/vm/vm_page.c
+++ b/sys/vm/vm_page.c
@@ -2677,6 +2677,66 @@ vm_page_test_dirty(vm_page_t m)
                vm_page_dirty(m);
 }
 
+void
+vm_page_lock_func(vm_page_t m, const char *file, int line)
+{
+
+#if LOCK_DEBUG > 0 || defined(MUTEX_NOINLINE)
+       _mtx_lock_flags(vm_page_lockptr(m), 0, file, line);
+#else
+       __mtx_lock(vm_page_lockptr(m), 0, file, line);
+#endif
+}
+
+void
+vm_page_unlock_func(vm_page_t m, const char *file, int line)
+{
+
+#if LOCK_DEBUG > 0 || defined(MUTEX_NOINLINE)
+       _mtx_unlock_flags(vm_page_lockptr(m), 0, file, line);
+#else
+       __mtx_unlock(vm_page_lockptr(m), curthread, 0, file, line);
+#endif
+}
+
+int
+vm_page_trylock_func(vm_page_t m, const char *file, int line)
+{
+
+       return (_mtx_trylock(vm_page_lockptr(m), 0, file, line));
+}
+
+void
+vm_page_lock_assert_func(vm_page_t m, int a, const char *file, int line)
+{
+
+#ifdef INVARIANTS
+       _mtx_assert(vm_page_lockptr(m), a, file, line);
+#endif
+}
+
+vm_page_bits_t
+vm_page_read_dirty_func(vm_page_t m)
+{
+
+       return (m->dirty);
+}
+
+vm_page_bits_t
+vm_page_read_valid_func(vm_page_t m)
+{
+
+       return (m->valid);
+}
+
+void
+vm_page_write_valid_func(vm_page_t m, vm_page_bits_t v)
+{
+
+       m->valid = v;
+}
+
+
 int so_zerocp_fullpage = 0;
 
 /*
diff --git a/sys/vm/vm_page.h b/sys/vm/vm_page.h
index 7099b70..4f8f71e 100644
--- a/sys/vm/vm_page.h
+++ b/sys/vm/vm_page.h
@@ -218,12 +218,50 @@ extern struct vpglocks pa_lock[];
 
 #define        PA_LOCK_ASSERT(pa, a)   mtx_assert(PA_LOCKPTR(pa), (a))
 
+#ifdef KLD_MODULE
+#define        vm_page_lock(m)         vm_page_lock_func((m), LOCK_FILE, 
LOCK_LINE)
+#define        vm_page_unlock(m)       vm_page_unlock_func((m), LOCK_FILE, 
LOCK_LINE)
+#define        vm_page_trylock(m)      vm_page_trylock_func((m), LOCK_FILE, 
LOCK_LINE)
+#ifdef INVARIANTS
+#define        vm_page_lock_assert(m, a)       \
+    vm_page_lock_assert_func((m), (a), LOCK_FILE, LOCK_LINE)
+#else
+#define        vm_page_lock_assert(m, a)
+#endif
+
+#define        vm_page_read_dirty(m)   vm_page_read_dirty_func((m))
+#define        vm_page_read_valid(m)   vm_page_read_valid_func((m))
+#define        vm_page_write_valid(m, v)       vm_page_write_valid_func((m), 
(v))
+
+#else  /* KLD_MODULE */
 #define        vm_page_lockptr(m)      (PA_LOCKPTR(VM_PAGE_TO_PHYS((m))))
 #define        vm_page_lock(m)         mtx_lock(vm_page_lockptr((m)))
 #define        vm_page_unlock(m)       mtx_unlock(vm_page_lockptr((m)))
 #define        vm_page_trylock(m)      mtx_trylock(vm_page_lockptr((m)))
 #define        vm_page_lock_assert(m, a)       
mtx_assert(vm_page_lockptr((m)), (a))
 
+static inline vm_page_bits_t
+vm_page_read_dirty(vm_page_t m)
+{
+
+       return (m->dirty);
+}
+
+static inline vm_page_bits_t
+vm_page_read_valid(vm_page_t m)
+{
+
+       return (m->valid);
+}
+
+static inline void
+vm_page_write_valid(vm_page_t m, vm_page_bits_t v)
+{
+
+       m->valid = v;
+}
+#endif
+
 #define        vm_page_queue_free_mtx  vm_page_queue_free_lock.data
 /*
  * These are the flags defined for vm_page.
@@ -403,6 +441,15 @@ void vm_page_cowfault (vm_page_t);
 int vm_page_cowsetup(vm_page_t);
 void vm_page_cowclear (vm_page_t);
 
+void vm_page_lock_func(vm_page_t m, const char *file, int line);
+void vm_page_unlock_func(vm_page_t m, const char *file, int line);
+int vm_page_trylock_func(vm_page_t m, const char *file, int line);
+void vm_page_lock_assert_func(vm_page_t m, int a, const char *file, int line);
+
+vm_page_bits_t vm_page_read_dirty_func(vm_page_t m);
+vm_page_bits_t vm_page_read_valid_func(vm_page_t m);
+void vm_page_write_valid_func(vm_page_t m, vm_page_bits_t v);
+
 #ifdef INVARIANTS
 void vm_page_object_lock_assert(vm_page_t m);
 #define        VM_PAGE_OBJECT_LOCK_ASSERT(m)   vm_page_object_lock_assert(m)

Attachment: pgpLCP7NLYNvh.pgp
Description: PGP signature

Reply via email to