Author: alc
Date: Fri Dec 17 22:41:22 2010
New Revision: 216511
URL: http://svn.freebsd.org/changeset/base/216511

Log:
  Implement and use a single optimized function for unholding a set of pages.
  
  Reviewed by:  kib@

Modified:
  head/sys/dev/cxgb/ulp/tom/cxgb_cpl_socket.c
  head/sys/dev/cxgb/ulp/tom/cxgb_ddp.c
  head/sys/dev/cxgb/ulp/tom/cxgb_vm.c
  head/sys/dev/cxgb/ulp/tom/cxgb_vm.h
  head/sys/kern/sys_pipe.c
  head/sys/kern/vfs_bio.c
  head/sys/vm/vm_page.c
  head/sys/vm/vm_page.h

Modified: head/sys/dev/cxgb/ulp/tom/cxgb_cpl_socket.c
==============================================================================
--- head/sys/dev/cxgb/ulp/tom/cxgb_cpl_socket.c Fri Dec 17 22:18:09 2010        
(r216510)
+++ head/sys/dev/cxgb/ulp/tom/cxgb_cpl_socket.c Fri Dec 17 22:41:22 2010        
(r216511)
@@ -454,7 +454,7 @@ sendmore:
        while (uiotmp.uio_resid > 0) {
                rv = cxgb_vm_page_to_miov(toep, &uiotmp, &m);
                if (rv) {
-                       vm_fault_unhold_pages(toep->tp_pages, count);
+                       vm_page_unhold_pages(toep->tp_pages, count);
                        return (rv);
                }
                uio->uio_resid -= m->m_pkthdr.len;
@@ -469,7 +469,7 @@ sendmore:
         * 
         */
        cxgb_wait_dma_completion(toep);
-       vm_fault_unhold_pages(toep->tp_pages, count);
+       vm_page_unhold_pages(toep->tp_pages, count);
        /*
         * If there is more data to send adjust local copy of iov
         * to point to teh start

Modified: head/sys/dev/cxgb/ulp/tom/cxgb_ddp.c
==============================================================================
--- head/sys/dev/cxgb/ulp/tom/cxgb_ddp.c        Fri Dec 17 22:18:09 2010        
(r216510)
+++ head/sys/dev/cxgb/ulp/tom/cxgb_ddp.c        Fri Dec 17 22:41:22 2010        
(r216511)
@@ -175,7 +175,7 @@ different_gl:
        *newgl = p;
        return (0);
 unpin:
-       vm_fault_unhold_pages(p->dgl_pages, npages);
+       vm_page_unhold_pages(p->dgl_pages, npages);
 
 free_gl:
        
@@ -208,7 +208,7 @@ ddp_gl_free_pages(struct ddp_gather_list
        /*
         * XXX mark pages as dirty before unholding 
         */
-       vm_fault_unhold_pages(gl->dgl_pages, gl->dgl_nelem);
+       vm_page_unhold_pages(gl->dgl_pages, gl->dgl_nelem);
 }
 
 void

Modified: head/sys/dev/cxgb/ulp/tom/cxgb_vm.c
==============================================================================
--- head/sys/dev/cxgb/ulp/tom/cxgb_vm.c Fri Dec 17 22:18:09 2010        
(r216510)
+++ head/sys/dev/cxgb/ulp/tom/cxgb_vm.c Fri Dec 17 22:41:22 2010        
(r216511)
@@ -150,16 +150,3 @@ error:     
                }
        return (EFAULT);
 }
-
-void
-vm_fault_unhold_pages(vm_page_t *mp, int count)
-{
-
-       KASSERT(count >= 0, ("negative count %d", count));
-       while (count--) {
-               vm_page_lock(*mp);
-               vm_page_unhold(*mp);
-               vm_page_unlock(*mp);
-               mp++;
-       }
-}

Modified: head/sys/dev/cxgb/ulp/tom/cxgb_vm.h
==============================================================================
--- head/sys/dev/cxgb/ulp/tom/cxgb_vm.h Fri Dec 17 22:18:09 2010        
(r216510)
+++ head/sys/dev/cxgb/ulp/tom/cxgb_vm.h Fri Dec 17 22:41:22 2010        
(r216511)
@@ -34,6 +34,5 @@ $FreeBSD$
 
 int vm_fault_hold_user_pages(vm_map_t map, vm_offset_t addr,
     vm_page_t *mp, int count, vm_prot_t prot);
-void vm_fault_unhold_pages(vm_page_t *mp, int count);
 
 #endif

Modified: head/sys/kern/sys_pipe.c
==============================================================================
--- head/sys/kern/sys_pipe.c    Fri Dec 17 22:18:09 2010        (r216510)
+++ head/sys/kern/sys_pipe.c    Fri Dec 17 22:41:22 2010        (r216511)
@@ -749,7 +749,7 @@ pipe_build_write_buffer(wpipe, uio)
 {
        pmap_t pmap;
        u_int size;
-       int i, j;
+       int i;
        vm_offset_t addr, endaddr;
 
        PIPE_LOCK_ASSERT(wpipe, MA_NOTOWNED);
@@ -771,11 +771,7 @@ pipe_build_write_buffer(wpipe, uio)
                 */
        race:
                if (vm_fault_quick((caddr_t)addr, VM_PROT_READ) < 0) {
-                       for (j = 0; j < i; j++) {
-                               vm_page_lock(wpipe->pipe_map.ms[j]);
-                               vm_page_unhold(wpipe->pipe_map.ms[j]);
-                               vm_page_unlock(wpipe->pipe_map.ms[j]);
-                       }
+                       vm_page_unhold_pages(wpipe->pipe_map.ms, i);
                        return (EFAULT);
                }
                wpipe->pipe_map.ms[i] = pmap_extract_and_hold(pmap, addr,
@@ -812,14 +808,9 @@ static void
 pipe_destroy_write_buffer(wpipe)
        struct pipe *wpipe;
 {
-       int i;
 
        PIPE_LOCK_ASSERT(wpipe, MA_OWNED);
-       for (i = 0; i < wpipe->pipe_map.npages; i++) {
-               vm_page_lock(wpipe->pipe_map.ms[i]);
-               vm_page_unhold(wpipe->pipe_map.ms[i]);
-               vm_page_unlock(wpipe->pipe_map.ms[i]);
-       }
+       vm_page_unhold_pages(wpipe->pipe_map.ms, wpipe->pipe_map.npages);
        wpipe->pipe_map.npages = 0;
 }
 

Modified: head/sys/kern/vfs_bio.c
==============================================================================
--- head/sys/kern/vfs_bio.c     Fri Dec 17 22:18:09 2010        (r216510)
+++ head/sys/kern/vfs_bio.c     Fri Dec 17 22:41:22 2010        (r216511)
@@ -3911,16 +3911,11 @@ retry:
 void
 vunmapbuf(struct buf *bp)
 {
-       int pidx;
        int npages;
 
        npages = bp->b_npages;
        pmap_qremove(trunc_page((vm_offset_t)bp->b_data), npages);
-       for (pidx = 0; pidx < npages; pidx++) {
-               vm_page_lock(bp->b_pages[pidx]);
-               vm_page_unhold(bp->b_pages[pidx]);
-               vm_page_unlock(bp->b_pages[pidx]);
-       }
+       vm_page_unhold_pages(bp->b_pages, npages);
        
        bp->b_data = bp->b_saveaddr;
 }

Modified: head/sys/vm/vm_page.c
==============================================================================
--- head/sys/vm/vm_page.c       Fri Dec 17 22:18:09 2010        (r216510)
+++ head/sys/vm/vm_page.c       Fri Dec 17 22:41:22 2010        (r216511)
@@ -601,6 +601,35 @@ vm_page_unhold(vm_page_t mem)
 }
 
 /*
+ *     vm_page_unhold_pages:
+ *
+ *     Unhold each of the pages that is referenced by the given array.
+ */ 
+void
+vm_page_unhold_pages(vm_page_t *ma, int count)
+{
+       struct mtx *mtx, *new_mtx;
+
+       mtx = NULL;
+       for (; count != 0; count--) {
+               /*
+                * Avoid releasing and reacquiring the same page lock.
+                */
+               new_mtx = vm_page_lockptr(*ma);
+               if (mtx != new_mtx) {
+                       if (mtx != NULL)
+                               mtx_unlock(mtx);
+                       mtx = new_mtx;
+                       mtx_lock(mtx);
+               }
+               vm_page_unhold(*ma);
+               ma++;
+       }
+       if (mtx != NULL)
+               mtx_unlock(mtx);
+}
+
+/*
  *     vm_page_free:
  *
  *     Free a page.

Modified: head/sys/vm/vm_page.h
==============================================================================
--- head/sys/vm/vm_page.h       Fri Dec 17 22:18:09 2010        (r216510)
+++ head/sys/vm/vm_page.h       Fri Dec 17 22:41:22 2010        (r216511)
@@ -364,6 +364,7 @@ void vm_page_set_valid(vm_page_t m, int 
 void vm_page_sleep(vm_page_t m, const char *msg);
 vm_page_t vm_page_splay(vm_pindex_t, vm_page_t);
 vm_offset_t vm_page_startup(vm_offset_t vaddr);
+void vm_page_unhold_pages(vm_page_t *ma, int count);
 void vm_page_unwire (vm_page_t, int);
 void vm_page_wire (vm_page_t);
 void vm_page_set_validclean (vm_page_t, int, int);
_______________________________________________
svn-src-head@freebsd.org mailing list
http://lists.freebsd.org/mailman/listinfo/svn-src-head
To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"

Reply via email to